abhishekt commited on
Commit
d43d27c
·
1 Parent(s): de6764a
Files changed (1) hide show
  1. app.py +30 -7
app.py CHANGED
@@ -5,19 +5,42 @@ import transformers
5
 
6
  openai.api_key = os.environ["api"]
7
 
8
- generator = transformers.pipeline("text-generation", model="EleutherAI/gpt-neo-2.7B")
9
 
10
  def generateBlogTopics(prompt1):
11
- response = generator("Generate blog topics on: {}. \n \n 1. ".format(prompt1), max_length=100, do_sample=True, temperature=0.7)[0]["generated_text"]
12
- return response
 
 
 
 
 
 
 
 
13
 
14
  def generateBlogSections(prompt1):
15
- response = generator("Expand the blog title in to high level blog sections: {} \n\n- Introduction: ".format(prompt1), max_length=100, do_sample=True, temperature=0.6)[0]["generated_text"]
16
- return response
 
 
 
 
 
 
 
 
17
 
18
  def blogSectionExpander(prompt1):
19
- response = generator("Expand the blog section in to a detailed professional, witty and clever explanation.\n\n {}".format(prompt1), max_length=400, do_sample=True, temperature=0.7)[0]["generated_text"]
20
- return response
 
 
 
 
 
 
 
 
21
 
22
  input_text = gr.inputs.Textbox(lines=5, label="Enter prompt text here:")
23
 
 
5
 
6
  openai.api_key = os.environ["api"]
7
 
 
8
 
9
  def generateBlogTopics(prompt1):
10
+ response = openai.Completion.create(
11
+ engine="text-davinci-002",
12
+ prompt="Generate blog topics on: {}. \n \n 1. ".format(prompt1),
13
+ max_tokens=100,
14
+ n=1,
15
+ stop=None,
16
+ temperature=0.7,
17
+ )
18
+
19
+ return response.choices[0].text
20
 
21
  def generateBlogSections(prompt1):
22
+ response = openai.Completion.create(
23
+ engine="text-davinci-002",
24
+ prompt="Expand the blog title in to high level blog sections: {} \n\n- Introduction: ".format(prompt1),
25
+ max_tokens=100,
26
+ n=1,
27
+ stop=None,
28
+ temperature=0.6,
29
+ )
30
+
31
+ return response.choices[0].text
32
 
33
  def blogSectionExpander(prompt1):
34
+ response = openai.Completion.create(
35
+ engine="text-davinci-002",
36
+ prompt="Expand the blog section in to a detailed professional, witty and clever explanation.\n\n {}".format(prompt1),
37
+ max_tokens=400,
38
+ n=1,
39
+ stop=None,
40
+ temperature=0.7,
41
+ )
42
+
43
+ return response.choices[0].text
44
 
45
  input_text = gr.inputs.Textbox(lines=5, label="Enter prompt text here:")
46