Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -61,25 +61,20 @@ def translate(
|
|
61 |
print(f'Text is - {source_text}')
|
62 |
|
63 |
prompt = Prompt_template(source_text, source_lang, target_lang)
|
64 |
-
input_ids = tokenizer(prompt, return_tensors="pt").
|
65 |
|
66 |
-
streamer = TextIteratorStreamer(tokenizer, **{"skip_special_tokens": True, "skip_prompt": True, 'clean_up_tokenization_spaces':False,})
|
67 |
-
|
68 |
generate_kwargs = dict(
|
69 |
input_ids=input_ids,
|
70 |
-
streamer=streamer,
|
71 |
max_length=max_length,
|
72 |
do_sample=True,
|
73 |
temperature=temperature,
|
74 |
)
|
75 |
-
|
76 |
-
thread = Thread(target=model.generate, kwargs=generate_kwargs)
|
77 |
-
thread.start()
|
78 |
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
|
|
83 |
|
84 |
CSS = """
|
85 |
h1 {
|
|
|
61 |
print(f'Text is - {source_text}')
|
62 |
|
63 |
prompt = Prompt_template(source_text, source_lang, target_lang)
|
64 |
+
input_ids = tokenizer(prompt, return_tensors="pt").input_ids
|
65 |
|
|
|
|
|
66 |
generate_kwargs = dict(
|
67 |
input_ids=input_ids,
|
|
|
68 |
max_length=max_length,
|
69 |
do_sample=True,
|
70 |
temperature=temperature,
|
71 |
)
|
|
|
|
|
|
|
72 |
|
73 |
+
generate_ids = model.generate(**generate_kwargs)
|
74 |
+
|
75 |
+
resp = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
76 |
+
|
77 |
+
return resp
|
78 |
|
79 |
CSS = """
|
80 |
h1 {
|