import gradio as gr
import requests
import os 

##Bloom Inference API
API_URL = "https://api-inference.huggingface.co/models/bigscience/bloom"

headers = {"Authorization": f"Bearer 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"}
prompt_sep = '😃'


def text_generate(prompt, top_p=0.8, top_k=100, temperature=1.0, num_beams=3, repetition_penalty=3.0): 
  #Prints to debug the code
  print(f"*****Inside text_generate - Prompt is :{prompt}")
  max_tokens = 250
  max_prompt_len = 50
  json_ = {"inputs": prompt[-max_prompt_len:],
            "parameters":
            {
            "top_p": float(top_p),
            "top_k": top_k,
          "temperature": float(temperature),
          "max_new_tokens": max_tokens,
          "return_full_text": True,
          "do_sample":True,
          "num_beams": num_beams,
          "repetition_penalty": float(repetition_penalty),
          }, 
          "options": 
          {"use_cache": True,
          "wait_for_model": True,
          },}
  print(f"Gen params is: {json_}")
  response = requests.post(API_URL, headers=headers, json=json_)
  print(f"Response  is : {response}")
  output = response.json()
  print(f"output is : {output}") 
  output_tmp = output[0]['generated_text']
  print(f"output_tmp is: {output_tmp}")
  solution = output_tmp.split("\nQ:")[0]   
  print(f"Final response after splits is: {solution}")
  if '\nOutput:' in solution:
    final_solution = solution.split("\nOutput:")[0] 
    print(f"Response after removing output is: {final_solution}")
  # elif '\n\n' in solution:
  #  final_solution = solution.split("\n\n")[0] 
  #  print(f"Response after removing new line entries is: {final_solution}")
  else:
    final_solution = solution
    
  final_solution = prompt[:max(0, len(prompt) - max_prompt_len)].replace(prompt_sep, '') + prompt_sep + final_solution.replace(prompt_sep, '')
  
  if 0:
    if len(generated_txt) == 0 :
      display_output = final_solution
    else:
      display_output = generated_txt[:-len(prompt)] + final_solution
    new_prompt = final_solution[len(prompt):]
    print(f"new prompt for next cycle is : {new_prompt}")
    print(f"display_output for printing on screen is : {display_output}")
    if len(new_prompt) == 0:
      temp_text = display_output[::-1]
      print(f"What is the last character of sentence? : {temp_text[0]}")
      if temp_text[1] == '.':
        first_period_loc = temp_text[2:].find('.') + 1
        print(f"Location of last Period is: {first_period_loc}")
        new_prompt = display_output[-first_period_loc:-1]
        print(f"Not sending blank as prompt so new prompt for next cycle is : {new_prompt}")
      else:
        print("HERE")
        first_period_loc = temp_text.find('.')
        print(f"Location of last Period is : {first_period_loc}")
        new_prompt = display_output[-first_period_loc:-1]
        print(f"Not sending blank as prompt so new prompt for next cycle is : {new_prompt}")
      display_output = display_output[:-1]
      
  return final_solution


demo = gr.Blocks()

# Test it:
# Mike and John are fighting in a war. A monster caught John. John shout: “Helping!” Mike run to him, saved him, but Mike was killed by the monster
# 迈克和约翰正在打仗。一个怪物抓住了约翰。约翰喊道:“救命!”迈克跑向他,救了他,但迈克被怪物杀了
# Mike and John are fighting in a war. A monster caught John. John shout: “Helping!” Mike run to him, saved him, but Mike was killed by the monster. John looked at Mike
# 迈克和约翰正在打仗。一个怪物抓住了约翰。约翰喊道:“救命!”迈克跑向他,救了他,但迈克被怪物杀了。约翰看着迈克

with demo:
  gr.Markdown("<h1><center>Write Stories Using Bloom</center></h1>")
  gr.Markdown(
        """Forked form https://huggingface.co/spaces/EuroPython2022/Write-Stories-Using-Bloom. \n Bloom is a model by [HuggingFace](https://huggingface.co/bigscience/bloom) and a team of more than 1000 researchers coming together as [BigScienceW Bloom](https://twitter.com/BigscienceW).\n\nLarge language models have demonstrated a capability of producing coherent sentences and given a context we can pretty much decide the *theme* of generated text.\n\nHow to Use this App: Use the sample text given as prompt or type in a new prompt as a starting point of your awesome story! Just keep pressing the 'Generate Text' Button and go crazy!\n\nHow this App works: This app operates by feeding back the text generated by Bloom to itself as a Prompt for next generation round and so on. Currently, due to size-limits on Prompt and Token generation, we are only able to feed very limited-length text as Prompt and are getting very few tokens generated in-turn. This makes it difficult to keep a tab on theme of text generation, so please bear with that. In summary, I believe it is a nice little fun App which you can play with for a while.\n\nThis Space is created by [Yuvraj Sharma](https://twitter.com/yvrjsharma) for EuroPython 2022 Demo."""
        )
  with gr.Row():
    input_prompt = gr.Textbox(label=f"Write some text to get started... (text after {prompt_sep} is the truncated prompt inputted to Bloom)", lines=3, value="Dear human philosophers, I read your comments on my abilities and limitations with great interest.")  
    
  # with gr.Row():
  #  generated_txt = gr.Textbox(lines=7, visible = False)
    
  with gr.Row():
    top_p = gr.Slider(label="top_p", minimum=0., maximum=1.0, value=0.8, step=0.1, visible = True)
  with gr.Row():
    top_k = gr.Slider(label="top_k", minimum=1, maximum=500, value=100, step=20, visible = True)
  with gr.Row():
    temperature = gr.Slider(label="temperature", minimum=0., maximum=2.0, value=1.0, step=0.1, visible = True)
  with gr.Row():
    num_beams = gr.Slider(label="num_beams", minimum=1, maximum=6, value=3, step=1, visible = True)
  with gr.Row():
    repetition_penalty = gr.Slider(label="repetition_penalty", minimum=1.0, maximum=6.0, value=3.0, step=1.0, visible = True)
  
  b1 = gr.Button("Generate Your Story")

  # b1.click(text_generate, inputs=[input_prompt, generated_txt, top_p, top_k, temperature, num_beams, repetition_penalty], outputs=[generated_txt, input_prompt]) 
  b1.click(text_generate, inputs=[input_prompt, top_p, top_k, temperature, num_beams, repetition_penalty], outputs=[input_prompt]) 

demo.launch(enable_queue=True, debug=True)