leandrocarneiro commited on
Commit
b482a9c
·
verified ·
1 Parent(s): 566bba1

Update rag.py

Browse files
Files changed (1) hide show
  1. rag.py +2 -2
rag.py CHANGED
@@ -59,7 +59,7 @@ class Rag:
59
 
60
  prompt_template = """Your task is to create news to a newspaper based on pieces of texts delimited by <> and a question delimited by <>.
61
  Do not make up any information, create the news just based on the given information on the pieces of texts delimited by <>.
62
- If the information is not enough to create the news, you can use your knowledge to complete the news.
63
  The news should have a tittle.
64
  The news should be written in a formal language.
65
  The news should have between {min_words} and {max_words} words and it should be in portuguese language.
@@ -71,7 +71,7 @@ class Rag:
71
  partial_variables={"min_words": min_words, "max_words": max_words})
72
 
73
  self.qa = ConversationalRetrievalChain.from_llm(
74
- llm=ChatOpenAI(model_name="gpt-3.5-turbo",
75
  temperature=1,
76
  openai_api_key=os.environ['OPENAI_KEY'],
77
  max_tokens=int(int(max_words) + (int(max_words) / 2))), #número máximo de tokens para a resposta
 
59
 
60
  prompt_template = """Your task is to create news to a newspaper based on pieces of texts delimited by <> and a question delimited by <>.
61
  Do not make up any information, create the news just based on the given information on the pieces of texts delimited by <>.
62
+ If the information is not enough to create the news, just say that you need more texts to create the news.
63
  The news should have a tittle.
64
  The news should be written in a formal language.
65
  The news should have between {min_words} and {max_words} words and it should be in portuguese language.
 
71
  partial_variables={"min_words": min_words, "max_words": max_words})
72
 
73
  self.qa = ConversationalRetrievalChain.from_llm(
74
+ llm=ChatOpenAI(model_name="gpt-3.5-turbo-1106",
75
  temperature=1,
76
  openai_api_key=os.environ['OPENAI_KEY'],
77
  max_tokens=int(int(max_words) + (int(max_words) / 2))), #número máximo de tokens para a resposta