Akjava commited on
Commit
105fc52
Β·
verified Β·
1 Parent(s): 6c228f1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -210,7 +210,7 @@ def trans(text):
210
  return None
211
 
212
  # γƒ†γ‚­γ‚Ήγƒˆγ«θ¨€θͺžγ‚Ώγ‚°γ‚’δ»˜δΈŽγ—γ€γƒγ‚€γƒˆεˆ—γ«ε€‰ζ›
213
- input_text = bf"<2ja>{text}"
214
 
215
  # γƒˆγƒΌγ‚―γƒŠγ‚€γ‚Ί
216
  tokens = llm.tokenize(input_text)
@@ -284,8 +284,8 @@ def respond(
284
  repeat_penalty: float,
285
  ):
286
  llama = Llama("models/madlad400-3b-mt-q8_0.gguf")
287
- #tokens = llama.tokenize(bf"<2ja>{message}")#
288
- tokens = llama.tokenize(b"What is the capital of France?")
289
  llama.encode(tokens)
290
  tokens = [llama.decoder_start_token()]
291
  for token in llama.generate(tokens, top_k=40, top_p=0.95, temp=1, repeat_penalty=1.0):
 
210
  return None
211
 
212
  # γƒ†γ‚­γ‚Ήγƒˆγ«θ¨€θͺžγ‚Ώγ‚°γ‚’δ»˜δΈŽγ—γ€γƒγ‚€γƒˆεˆ—γ«ε€‰ζ›
213
+ input_text = f"<2ja>{text}"
214
 
215
  # γƒˆγƒΌγ‚―γƒŠγ‚€γ‚Ί
216
  tokens = llm.tokenize(input_text)
 
284
  repeat_penalty: float,
285
  ):
286
  llama = Llama("models/madlad400-3b-mt-q8_0.gguf")
287
+ #tokens = llama.tokenize(f"<2ja>{message}")#
288
+ tokens = llama.tokenize(f"{What is the capital of France?}".encode("utf-8"))
289
  llama.encode(tokens)
290
  tokens = [llama.decoder_start_token()]
291
  for token in llama.generate(tokens, top_k=40, top_p=0.95, temp=1, repeat_penalty=1.0):