Spaces:
Sleeping
Sleeping
Upload 3 files
Browse files- app.py +47 -0
- requirements.txt +5 -0
- utils.py +84 -0
app.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import gradio as gr
|
3 |
+
|
4 |
+
from utils import *
|
5 |
+
|
6 |
+
|
7 |
+
def generate_text(llm, api_key, email_features, example_feature_template, example_subj_body_template):
|
8 |
+
|
9 |
+
if not llm:
|
10 |
+
return 'No Model Selected'
|
11 |
+
elif not api_key:
|
12 |
+
return 'No API Key given'
|
13 |
+
else:
|
14 |
+
fs_prompt = return_template(example_feature_template, example_subj_body_template)
|
15 |
+
llm_chain = llm_chain_func(fs_prompt, llm, api_key)
|
16 |
+
return llm_chain.run(email_features)
|
17 |
+
|
18 |
+
|
19 |
+
with gr.Blocks() as demo:
|
20 |
+
with gr.Row():
|
21 |
+
with gr.Column():
|
22 |
+
llm = gr.Dropdown(["gpt-3.5-turbo", "google_flan_t5_xxl"], type="value", multiselect=False)
|
23 |
+
api_key = gr.Text(label="Enter API Key for the selected model")
|
24 |
+
email_features = gr.TextArea(label="Input Email features", value=input_temp)
|
25 |
+
example_feature_template = gr.TextArea(label="Example Email - features", value = example_input_feat_temp)
|
26 |
+
example_subj_body_template = gr.TextArea(label="Example Email - subject and body", value = example_input_subj_body_temp)
|
27 |
+
|
28 |
+
with gr.Column():
|
29 |
+
generated_output = gr.TextArea(label="Generated Text")
|
30 |
+
|
31 |
+
# input_dict = {"llm":llm,
|
32 |
+
# "api_key":api_key,
|
33 |
+
# "email_features":email_features,
|
34 |
+
# "example_feature_template":example_feature_template,
|
35 |
+
# "example_subj_body_template":example_subj_body_template
|
36 |
+
# }
|
37 |
+
|
38 |
+
btn = gr.Button("Generate")
|
39 |
+
|
40 |
+
btn.click(generate_text,
|
41 |
+
inputs=[llm, api_key, email_features, example_feature_template, example_subj_body_template],
|
42 |
+
outputs=[generated_output])
|
43 |
+
|
44 |
+
# gr.Examples(["My name is Clara and I am"], inputs=[seed])
|
45 |
+
|
46 |
+
if __name__ == "__main__":
|
47 |
+
demo.launch(share=True)
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
langchain
|
3 |
+
openai
|
4 |
+
gradio
|
5 |
+
huggingface_hub
|
utils.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from langchain import PromptTemplate, FewShotPromptTemplate
|
3 |
+
from langchain import HuggingFaceHub, LLMChain, OpenAI
|
4 |
+
import os
|
5 |
+
|
6 |
+
|
7 |
+
input_temp = """Candidate Name - Hitesh
|
8 |
+
jobs in - Data Science
|
9 |
+
distance - 10 miles
|
10 |
+
pay range - $100,000 to $150,000 per year
|
11 |
+
job details - implementing analytics solution and generating insights for FMCG and Insurance markets
|
12 |
+
position available at - Bangalore, Mumbai, Atlanta, Peachtree City, and Woodstock in Georgia, India, US"""
|
13 |
+
|
14 |
+
|
15 |
+
example_input_feat_temp = """Candidate Name - Alex
|
16 |
+
jobs in - Netsuite ERP domain
|
17 |
+
distance - 5 miles
|
18 |
+
pay range - $180,000 to $200,000 per year
|
19 |
+
job details - implementing and upgrading the NetSuite system, understanding client requirements, and providing maintenance and support
|
20 |
+
position available at - Atlanta, Peachtree City, and Woodstock in Georgia, US"""
|
21 |
+
|
22 |
+
|
23 |
+
example_input_subj_body_temp = """Subject:
|
24 |
+
Netsuite ERP jobs 5 miles from you - Earn upto $200,000/year!
|
25 |
+
|
26 |
+
Body:
|
27 |
+
Hi Alex!
|
28 |
+
We haven't connected in a while, and I would love to get back in touch to help you find your next assignment!
|
29 |
+
We have several exciting job opportunities available for you in the NetSuite ERP domain. These positions include NetSuite Financial Consultant, NetSuite ARM Consultant, NetSuite Manager, and NetSuite Supply Chain Consultant. You can earn between $180,000 to $200,000 per year for any of these jobs. Each role involves implementing and upgrading the NetSuite system, understanding client requirements, and providing maintenance and support. As a successful candidate, you will work with stakeholders to optimize business processes and provide customizations to the NetSuite system. We have positions available in Atlanta, Peachtree City, and Woodstock in Georgia, US."""
|
30 |
+
|
31 |
+
|
32 |
+
def return_template(ex_feat, ex_subj_body):
|
33 |
+
|
34 |
+
examples = [{"Input" : ex_feat, "Output" : ex_subj_body}]
|
35 |
+
|
36 |
+
example_formatter_template = """
|
37 |
+
Input: {Input}
|
38 |
+
Output: {Output}\n
|
39 |
+
"""
|
40 |
+
|
41 |
+
example_prompt = PromptTemplate(
|
42 |
+
input_variables=["Input","Output"],
|
43 |
+
template=example_formatter_template,
|
44 |
+
)
|
45 |
+
|
46 |
+
# Finally, we create the `FewShotPromptTemplate` object.
|
47 |
+
few_shot_prompt = FewShotPromptTemplate(
|
48 |
+
# These are the examples we want to insert into the prompt.
|
49 |
+
examples=examples,
|
50 |
+
# This is how we want to format the examples when we insert them into the prompt.
|
51 |
+
example_prompt=example_prompt,
|
52 |
+
# The prefix is some text that goes before the examples in the prompt.
|
53 |
+
# Usually, this consists of intructions.
|
54 |
+
prefix="Generate an email subject and body by taking following inputs-",
|
55 |
+
# The suffix is some text that goes after the examples in the prompt.
|
56 |
+
# Usually, this is where the user input will go
|
57 |
+
suffix="Input: {input}\n\nOutput:",
|
58 |
+
# The input variables are the variables that the overall prompt expects.
|
59 |
+
input_variables=["input"],
|
60 |
+
# The example_separator is the string we will use to join the prefix, examples, and suffix together with.
|
61 |
+
example_separator="\n\n",
|
62 |
+
)
|
63 |
+
|
64 |
+
return few_shot_prompt
|
65 |
+
|
66 |
+
|
67 |
+
def llm_dict_func(llm, api_key):
|
68 |
+
|
69 |
+
if llm == "gpt-3.5-turbo":
|
70 |
+
return OpenAI(model_name="gpt-3.5-turbo",
|
71 |
+
openai_api_key = api_key)
|
72 |
+
|
73 |
+
elif llm == "google_flan_t5_xxl":
|
74 |
+
return HuggingFaceHub(repo_id="google/flan-t5-xxl",
|
75 |
+
huggingfacehub_api_token = api_key,
|
76 |
+
model_kwargs={"temperature":0.001, "max_new_tokens":500})
|
77 |
+
|
78 |
+
|
79 |
+
def llm_chain_func(fs_prompt, llm, api_key):
|
80 |
+
llm_chain = LLMChain(prompt=fs_prompt,
|
81 |
+
llm=llm_dict_func(llm, api_key)
|
82 |
+
)
|
83 |
+
|
84 |
+
return llm_chain
|