File size: 1,516 Bytes
4eaf3da
0a5759a
4eaf3da
 
 
 
 
 
 
 
0a5759a
 
 
 
4eaf3da
 
 
 
 
 
0a5759a
4eaf3da
 
0a5759a
 
4eaf3da
 
0a5759a
4eaf3da
 
 
 
 
 
 
0a5759a
4eaf3da
0a5759a
4eaf3da
 
 
 
 
 
0a5759a
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
from langchain.prompts import PromptTemplate
from langchain import HuggingFaceHub
from langchain.chains import LLMChain
from langchain.memory import ConversationBufferMemory
from redis.commands.search.query import Query
import time
import os
from dotenv import load_dotenv
import numpy as np
load_dotenv()
HUGGINGFACEHUB_API_TOKEN = os.getenv('HUGGINGFACEHUB_API_TOKEN')
repo_id = 'tiiuae/falcon-7b-instruct'

falcon_llm_1 = HuggingFaceHub(repo_id = repo_id, model_kwargs={'temperature':0.1,'max_new_tokens':500},huggingfacehub_api_token=HUGGINGFACEHUB_API_TOKEN)

prompt = PromptTemplate(
    input_variables=["product_description"],
    template="Create comma seperated product keywords to perform a query on a amazon dataset for this user input: {product_description}",
)

chain = LLMChain(llm=falcon_llm_1, prompt=prompt)

# code The response
repo_id_2 = 'tiiuae/falcon-7b' 
template = """You are a salesman. Be kind, detailed and nice.  take the given context and Present the given queried search result in a nice way as answer to the user_msg. dont ask questions back or freestyle and invent followup conversation! just 

{chat_history}
{user_msg}
Chatbot:"""

prompt = PromptTemplate(
    input_variables=["chat_history", "user_msg"],
    template=template
)
memory = ConversationBufferMemory(memory_key="chat_history")

llm_chain = LLMChain(
    llm = HuggingFaceHub(repo_id = repo_id_2, model_kwargs={'temperature':0.8,'max_new_tokens':500}),
    prompt=prompt,
    verbose=False,
    memory=memory,
)