Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -52,7 +52,12 @@ def generate(prompt, history, system_prompt, temperature=0.9, max_new_tokens=256
|
|
52 |
formatted_prompt = format_prompt(f"{system_prompt} {prompt}", history)
|
53 |
#formatted_prompt = format_prompt_grammar(f"Corrected Sentence: {prompt}", history)
|
54 |
print("\nPROMPT: \n\t" + formatted_prompt)
|
55 |
-
|
|
|
|
|
|
|
|
|
|
|
56 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
57 |
output = ""
|
58 |
|
|
|
52 |
formatted_prompt = format_prompt(f"{system_prompt} {prompt}", history)
|
53 |
#formatted_prompt = format_prompt_grammar(f"Corrected Sentence: {prompt}", history)
|
54 |
print("\nPROMPT: \n\t" + formatted_prompt)
|
55 |
+
|
56 |
+
stream1 = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
57 |
+
stream2 = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=False, return_full_text=False)
|
58 |
+
print(f">> STREAM1 - '{stream1}'")
|
59 |
+
print(f">> STREAM2 - '{stream2}'")
|
60 |
+
# Generate text from the HF inference
|
61 |
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
62 |
output = ""
|
63 |
|