File size: 4,281 Bytes
32cd2af
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
import json
import requests
import gradio as gr
import random
import time
import os
import datetime
from datetime import datetime

print('for  update')

API_TOKEN = os.getenv("API_TOKEN")
DECODEM_TOKEN=os.getenv("DECODEM_TOKEN")


from huggingface_hub import InferenceApi
inference = InferenceApi("bigscience/bloom",token=API_TOKEN)

headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
url_decodemprompts='https://us-central1-createinsightsproject.cloudfunctions.net/getdecodemprompts'

data={"prompt_type":'ad_text_prompt',"decodem_token":DECODEM_TOKEN}
try:
    r = requests.post(url_decodemprompts, data=json.dumps(data), headers=headers)
except requests.exceptions.ReadTimeout as e:
    print(e)
#print(r.content)

prompt_text=str(r.content, 'UTF-8')
print(prompt_text)

def infer(prompt,
          max_length = 250,
          top_k = 0,
          num_beams = 0,
          no_repeat_ngram_size = 2,
          top_p = 0.9,
          seed=42,
          temperature=0.7,
          greedy_decoding = False,
          return_full_text = False):
    
    print(seed)
    top_k = None if top_k == 0 else top_k
    do_sample = False if num_beams > 0 else not greedy_decoding
    num_beams = None if (greedy_decoding or num_beams == 0) else num_beams
    no_repeat_ngram_size = None if num_beams is None else no_repeat_ngram_size
    top_p = None if num_beams else top_p
    early_stopping = None if num_beams is None else num_beams > 0

    params = {
        "max_new_tokens": max_length,
        "top_k": top_k,
        "top_p": top_p,
        "temperature": temperature,
        "do_sample": do_sample,
        "seed": seed,
        "early_stopping":early_stopping,
        "no_repeat_ngram_size":no_repeat_ngram_size,
        "num_beams":num_beams,
        "return_full_text":return_full_text
    }
    
    s = time.time()
    response = inference(prompt, params=params)
    #print(response)
    proc_time = time.time()-s
    #print(f"Processing time was {proc_time} seconds")
    return response

def getadline(text_inp):
    print(text_inp)
    print(datetime.today().strftime("%d-%m-%Y"))
  
    text = prompt+"\nInput:"+text_inp + "\nOutput:"
    resp = infer(text,seed=random.randint(0,100))
    
    generated_text=resp[0]['generated_text']
    result = generated_text.replace(text,'').strip()
    result = result.replace("Output:","")
    parts = result.split("###")
    topic = parts[0].strip()
    topic="\n".join(topic.split('\n')[:3])
    
    response_nsfw = requests.get('https://github.com/coffee-and-fun/google-profanity-words/raw/main/data/list.txt')
    data_nsfw = response_nsfw.text
    nsfwlist=data_nsfw.split('\n')
    nsfwlowerlist=[]
    for each in nsfwlist:
      if each!='':
        nsfwlowerlist.append(each.lower())
    nsfwlowerlist.extend(['bra','gay','lesbian',])
    print(topic)
    foundnsfw=0
    for each_word in nsfwlowerlist:
        if each_word in topic.lower() or each_word in text_inp :
            foundnsfw=1
    if foundnsfw==1:
        topic="Unsafe content found. Please try again with different prompts."
        print(topic)
    return(topic)

with gr.Blocks() as demo:
    gr.Markdown("<h1><center>Market Sizing Framework for Your Business</center></h1>")
    gr.Markdown(
        """ChatGPT based Insights from <a href="https://www.decodem.ai">Decodem.ai</a> for businesses.\nWhile ChatGPT has multiple use cases we have evolved specific use cases/ templates for businesses \n\n This template provides ideas on how a business can size a market they are entering. Enter a business area to size and get the results. Use examples as a guide. We use a equally powerful AI model bigscience/bloom."""
        )
    textbox = gr.Textbox(placeholder="Enter market size focus for business here...", lines=1,label='Your business area')
    btn = gr.Button("Generate")    
    #output1 = gr.Textbox(lines=2,label='Market Sizing Framework')
    output_image = gr.components.Image(label="Image")


    btn.click(getideas,inputs=[textbox], outputs=[output_image])
    examples = gr.Examples(examples=['ice cream parlor in London','HR saas for fintech','book shops in NYC','Starbucks cafe in Bangalore','organic vegetables via ecommerce','grocery delivery'],
                           inputs=[textbox])
    

demo.launch()