Spaces:
Runtime error
Runtime error
File size: 4,339 Bytes
c0f1091 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 |
import whisper
from transformers import pipeline
from langchain.llms import OpenAI
from langchain import PromptTemplate
from langchain.prompts.chat import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.prompts import HumanMessagePromptTemplate
from langchain.schema.messages import SystemMessage
import os
os.environ["OPENAI_API_TYPE"] = "azure"
os.environ["OPENAI_API_VERSION"] = "2022-12-01"
os.environ["OPENAI_API_BASE"] = "https://pubsec-careerpathways-openai.openai.azure.com/"
os.environ["OPENAI_API_KEY"] = "19ac6255a55b448e92190c0e5e75156e"
from langchain.llms import AzureOpenAI
def get_llm():
llm = AzureOpenAI(
deployment_name="CareerPathwaysPOC",
model_name="gpt-35-turbo",
temperature=0,
max_tokens = 2000
)
return llm
def get_audio_text(file):
# specify a model, here its BASE
model = whisper.load_model("base")
# transcribe audio file
result = model.transcribe(file)
print(result["text"])
return result["text"]
# Send email using zapier
#agent.run("Send an Email to [email protected] via gmail summarizing the following text provided below : "+result["text"])
def get_audio_senti(file):
pipe = pipeline("audio-classification", model="hackathon-pln-es/wav2vec2-base-finetuned-sentiment-classification-MESD")
result = pipe(file)
return result
def get_summary_using_chatprompt(user_input):
chat_template = ChatPromptTemplate.from_messages(
[
SystemMessage(
content=(
"You are a helpful assistant that write a summary of the conversation between a user and the Education call center in bullet point"
"Extract all relevant information from the text and check whether the agent has violated any rules during the call."
"The summary should include key details such as the user's inquiry, the agent's responses, any requests made by the user, and any potential violations of call center rules or policies. Additionally, highlight any specific actions or follow-up steps discussed during the call."
)
),
HumanMessagePromptTemplate.from_template("{text}"),
]
)
#llm = ChatOpenAI(openai_api_key= openai_api_key)
llm = get_llm()
summary = llm(chat_template.format_messages(text=user_input))
return summary
def get_summary_using_prompt(user_input):
llm = get_llm()
template = "Please provide me the summary of the below conversion between user and agent.\n {uses_conversion}.\n\nSummary:\n"
prompt = PromptTemplate(
input_variables=["uses_conversion"],
template=template
)
summary_prompt = prompt.format(uses_conversion=user_input)
print(summary_prompt)
summary = llm(summary_prompt)
return summary
def get_summary_response(uses_conversion, userqueries):
llm = get_llm()
role = "You are a conversation analyst responsible for examining and assessing interactions between users and agents."
instructions = "Provide responses to the specific questions posed within the given conversation. \n Refrain from answering if the answer is not explicitly present in the provided text. \n Avoid offering explanations in your responses."
conversation_history = ""
template = "|im_start|>system\n {role} \n Instructions \n {instructions} \n conversation \n ${templateMessage} \n <|im_end|>\n ${conversation_history}\n <|im_start|>user\n${query}\n <|im_end|>\n"
#template = "Please answer this question : {userqueries} ? Please don't provide the Explanation for answer. If you don't find the answer in the below text , please don't answer. \n \n\n {uses_conversion} \n\n"
prompt = PromptTemplate(
input_variables=["role","instructions","templateMessage","conversation_history","query"],
template=template
)
summary_prompt = prompt.format(templateMessage=uses_conversion,query= userqueries, role=role,instructions= instructions,conversation_history= conversation_history)
print(summary_prompt)
summary = llm(summary_prompt)
summary = summary.replace('<|im_end|>', '')
print(summary)
return summary
|