Spaces:
Running
Running
luanpoppe
commited on
Commit
·
4b3b841
1
Parent(s):
85ee925
fix: prompt do gerar ementa
Browse files
_utils/gerar_documento_utils/utils.py
CHANGED
@@ -88,7 +88,7 @@ async def get_response_from_auxiliar_contextual_prompt(full_text_as_array: List[
|
|
88 |
|
89 |
def split_text_by_tokens(full_text: str):
|
90 |
tokens = encoding.encode(full_text)
|
91 |
-
max_tokens =
|
92 |
|
93 |
# Divide os tokens em partes de no máximo max_tokens
|
94 |
token_chunks = [
|
|
|
88 |
|
89 |
def split_text_by_tokens(full_text: str):
|
90 |
tokens = encoding.encode(full_text)
|
91 |
+
max_tokens = 600000
|
92 |
|
93 |
# Divide os tokens em partes de no máximo max_tokens
|
94 |
token_chunks = [
|
gerar_documento/views.py
CHANGED
@@ -164,7 +164,8 @@ class GerarEmentaView(AsyncAPIView):
|
|
164 |
text_splitted_by_tokens = split_text_by_tokens(full_text)
|
165 |
for text in text_splitted_by_tokens:
|
166 |
prompt_template = PromptTemplate(
|
167 |
-
input_variables=["context"],
|
|
|
168 |
)
|
169 |
texto_da_parte = await gerar_documento_instance.checar_se_resposta_vazia_do_documento_final(
|
170 |
obj.llm_ultimas_requests,
|
@@ -267,7 +268,8 @@ class GerarEmentaComPDFProprioView(AsyncAPIView):
|
|
267 |
text_splitted_by_tokens = split_text_by_tokens(full_text)
|
268 |
for text in text_splitted_by_tokens:
|
269 |
prompt_template = PromptTemplate(
|
270 |
-
input_variables=["context"],
|
|
|
271 |
)
|
272 |
texto_da_parte = await gerar_documento_instance.checar_se_resposta_vazia_do_documento_final(
|
273 |
serializer_obj.llm_ultimas_requests,
|
|
|
164 |
text_splitted_by_tokens = split_text_by_tokens(full_text)
|
165 |
for text in text_splitted_by_tokens:
|
166 |
prompt_template = PromptTemplate(
|
167 |
+
input_variables=["context"],
|
168 |
+
template=obj.prompt_gerar_documento,
|
169 |
)
|
170 |
texto_da_parte = await gerar_documento_instance.checar_se_resposta_vazia_do_documento_final(
|
171 |
obj.llm_ultimas_requests,
|
|
|
268 |
text_splitted_by_tokens = split_text_by_tokens(full_text)
|
269 |
for text in text_splitted_by_tokens:
|
270 |
prompt_template = PromptTemplate(
|
271 |
+
input_variables=["context"],
|
272 |
+
template=serializer_obj.prompt_gerar_documento,
|
273 |
)
|
274 |
texto_da_parte = await gerar_documento_instance.checar_se_resposta_vazia_do_documento_final(
|
275 |
serializer_obj.llm_ultimas_requests,
|