luanpoppe commited on
Commit
756fca0
·
1 Parent(s): d32424b

feat: refatorando serializers e nomes de algumas funções

Browse files
_utils/custom_exception_handler.py CHANGED
@@ -2,17 +2,18 @@
2
 
3
  from datetime import datetime
4
  import pytz
5
- from typing import Dict
6
  from rest_framework.views import exception_handler
7
  import logging
8
  from _utils.bubble_integrations.enviar_resposta_final import enviar_resposta_final
 
9
 
10
  logger = logging.getLogger(__name__)
11
 
12
 
13
  def custom_exception_handler(exc, context):
14
  print("---------------- CHEGOU NA FUNÇÃO PERSONALIZADA DE ERRO ----------------")
15
- if (context):
16
  serializer: Dict = context["view"].serializer
17
  else:
18
  serializer = {}
@@ -32,19 +33,21 @@ def custom_exception_handler(exc, context):
32
  if response and str(response.status_code)[0] != "2":
33
  logger.error(f"Validation error: {response.data}")
34
 
35
-
36
  return response
37
 
38
- def custom_exception_handler_wihout_api_handler(error, serializer: Dict):
 
 
 
39
  bahia_tz = pytz.timezone("America/Bahia")
40
  print("INICIANDO RESPOSTA DE ERRO PARA O BUBBLE")
41
  resposta_bubble = enviar_resposta_final(
42
- serializer.get("doc_id", ""),
43
- serializer.get("form_response_id", ""),
44
- serializer.get("version", ""),
45
- f"------------ ERRO NO BACKEND ÀS {datetime.now(bahia_tz).strftime("%d/%m/%Y - %H:%M:%S")} ------------:\nMensagem de erro: {error} ", # serializer.get("texto_completo", ""),
46
  True,
47
  )
48
  print("\n\nresposta_bubble.status_code", resposta_bubble.status_code)
49
  print("\n\nresposta_bubble.text", resposta_bubble.text)
50
- print("\n------------ MOTIVO DO ERRO -----------:", f"\n{error}")
 
2
 
3
  from datetime import datetime
4
  import pytz
5
+ from typing import Any, Dict, Union
6
  from rest_framework.views import exception_handler
7
  import logging
8
  from _utils.bubble_integrations.enviar_resposta_final import enviar_resposta_final
9
+ from gerar_documento.serializer import GerarDocumentoSerializerData
10
 
11
  logger = logging.getLogger(__name__)
12
 
13
 
14
  def custom_exception_handler(exc, context):
15
  print("---------------- CHEGOU NA FUNÇÃO PERSONALIZADA DE ERRO ----------------")
16
+ if context:
17
  serializer: Dict = context["view"].serializer
18
  else:
19
  serializer = {}
 
33
  if response and str(response.status_code)[0] != "2":
34
  logger.error(f"Validation error: {response.data}")
35
 
 
36
  return response
37
 
38
+
39
+ def custom_exception_handler_wihout_api_handler(
40
+ error, serializer: Union[GerarDocumentoSerializerData, Any]
41
+ ):
42
  bahia_tz = pytz.timezone("America/Bahia")
43
  print("INICIANDO RESPOSTA DE ERRO PARA O BUBBLE")
44
  resposta_bubble = enviar_resposta_final(
45
+ serializer.doc_id,
46
+ serializer.form_response_id,
47
+ serializer.version,
48
+ f"------------ ERRO NO BACKEND ÀS {datetime.now(bahia_tz).strftime("%d/%m/%Y - %H:%M:%S")} ------------:\nMensagem de erro: {error} ", # serializer.get("texto_completo", ""),
49
  True,
50
  )
51
  print("\n\nresposta_bubble.status_code", resposta_bubble.status_code)
52
  print("\n\nresposta_bubble.text", resposta_bubble.text)
53
+ print("\n------------ MOTIVO DO ERRO -----------:", f"\n{error}")
_utils/{resumo_completo_cursor.py → gerar_documento.py} RENAMED
@@ -1,6 +1,6 @@
1
  import os
2
  from langchain_core.messages import HumanMessage
3
- from typing import cast
4
  from _utils.langchain_utils.LLM_class import LLM
5
  from _utils.bubble_integrations.enviar_resposta_final import enviar_resposta_final
6
  from _utils.custom_exception_handler import custom_exception_handler_wihout_api_handler
@@ -26,6 +26,7 @@ import markdown
26
 
27
  from _utils.langchain_utils.Prompt_class import Prompt
28
  from _utils.utils import convert_markdown_to_HTML
 
29
 
30
 
31
  def reciprocal_rank_fusion(result_lists, weights=None):
@@ -53,36 +54,36 @@ os.environ.get("LANGCHAIN_API_KEY")
53
  os.environ["LANGCHAIN_PROJECT"] = "VELLA"
54
 
55
 
56
- async def get_llm_summary_answer_by_cursor_complete(
57
- serializer, listaPDFs, isBubble=False
58
  ):
59
  """Parâmetro "contexto" só deve ser passado quando quiser utilizar o teste com ragas, e assim, não quiser passar PDFs"""
60
  try:
61
  # Configuration
62
  config = RetrievalConfig(
63
- num_chunks=serializer["num_chunks_retrieval"],
64
- embedding_weight=serializer["embedding_weight"],
65
- bm25_weight=serializer["bm25_weight"],
66
- context_window=serializer["context_window"],
67
- chunk_overlap=serializer["chunk_overlap"],
68
  )
69
 
70
  contextual_retriever = ContextualRetriever(
71
- config, serializer["claude_context_model"]
72
  )
73
 
74
  # Initialize enhanced summarizer
75
  summarizer = GerarDocumento(
76
  config=config,
77
- embedding_model=serializer["hf_embedding"],
78
- chunk_overlap=serializer["chunk_overlap"],
79
- chunk_size=serializer["chunk_size"],
80
- num_k_rerank=serializer["num_k_rerank"],
81
- model_cohere_rerank=serializer["model_cohere_rerank"],
82
- # prompt_auxiliar=serializer["prompt_auxiliar"],
83
- gpt_model=serializer["model"],
84
- gpt_temperature=serializer["gpt_temperature"],
85
- prompt_gerar_documento=serializer["prompt_gerar_documento"],
86
  reciprocal_rank_fusion=reciprocal_rank_fusion,
87
  )
88
 
@@ -90,12 +91,12 @@ async def get_llm_summary_answer_by_cursor_complete(
90
  await get_full_text_and_all_PDFs_chunks(
91
  listaPDFs,
92
  summarizer.splitter,
93
- serializer["should_use_llama_parse"],
94
  isBubble,
95
  )
96
  )
97
 
98
- is_contextualized_chunk = serializer["should_have_contextual_chunks"]
99
 
100
  if is_contextualized_chunk:
101
  response_auxiliar_summary = (
@@ -133,7 +134,7 @@ async def get_llm_summary_answer_by_cursor_complete(
133
  )
134
  )
135
 
136
- llm_ultimas_requests = serializer["llm_ultimas_requests"]
137
  print("\nCOMEÇANDO A FAZER ÚLTIMA REQUISIÇÃO")
138
  structured_summaries = await summarizer.gerar_documento_final(
139
  vector_store,
@@ -172,9 +173,9 @@ async def get_llm_summary_answer_by_cursor_complete(
172
  if isBubble:
173
  print("COMEÇANDO A REQUISIÇÃO FINAL PARA O BUBBLE")
174
  enviar_resposta_final(
175
- serializer["doc_id"],
176
- serializer["form_response_id"],
177
- serializer["version"],
178
  texto_completo_como_html,
179
  False,
180
  cast(str, titulo_do_documento),
 
1
  import os
2
  from langchain_core.messages import HumanMessage
3
+ from typing import Any, Union, cast
4
  from _utils.langchain_utils.LLM_class import LLM
5
  from _utils.bubble_integrations.enviar_resposta_final import enviar_resposta_final
6
  from _utils.custom_exception_handler import custom_exception_handler_wihout_api_handler
 
26
 
27
  from _utils.langchain_utils.Prompt_class import Prompt
28
  from _utils.utils import convert_markdown_to_HTML
29
+ from gerar_documento.serializer import GerarDocumentoSerializerData
30
 
31
 
32
  def reciprocal_rank_fusion(result_lists, weights=None):
 
54
  os.environ["LANGCHAIN_PROJECT"] = "VELLA"
55
 
56
 
57
+ async def gerar_documento(
58
+ serializer: Union[GerarDocumentoSerializerData, Any], listaPDFs, isBubble=False
59
  ):
60
  """Parâmetro "contexto" só deve ser passado quando quiser utilizar o teste com ragas, e assim, não quiser passar PDFs"""
61
  try:
62
  # Configuration
63
  config = RetrievalConfig(
64
+ num_chunks=serializer.num_chunks_retrieval,
65
+ embedding_weight=serializer.embedding_weight,
66
+ bm25_weight=serializer.bm25_weight,
67
+ context_window=serializer.context_window,
68
+ chunk_overlap=serializer.chunk_overlap,
69
  )
70
 
71
  contextual_retriever = ContextualRetriever(
72
+ config, serializer.claude_context_model
73
  )
74
 
75
  # Initialize enhanced summarizer
76
  summarizer = GerarDocumento(
77
  config=config,
78
+ embedding_model=serializer.hf_embedding,
79
+ chunk_overlap=serializer.chunk_overlap,
80
+ chunk_size=serializer.chunk_size,
81
+ num_k_rerank=serializer.num_k_rerank,
82
+ model_cohere_rerank=serializer.model_cohere_rerank,
83
+ # prompt_auxiliar=serializer.prompt_auxiliar,
84
+ gpt_model=serializer.model,
85
+ gpt_temperature=serializer.gpt_temperature,
86
+ prompt_gerar_documento=serializer.prompt_gerar_documento,
87
  reciprocal_rank_fusion=reciprocal_rank_fusion,
88
  )
89
 
 
91
  await get_full_text_and_all_PDFs_chunks(
92
  listaPDFs,
93
  summarizer.splitter,
94
+ serializer.should_use_llama_parse,
95
  isBubble,
96
  )
97
  )
98
 
99
+ is_contextualized_chunk = serializer.should_have_contextual_chunks
100
 
101
  if is_contextualized_chunk:
102
  response_auxiliar_summary = (
 
134
  )
135
  )
136
 
137
+ llm_ultimas_requests = serializer.llm_ultimas_requests
138
  print("\nCOMEÇANDO A FAZER ÚLTIMA REQUISIÇÃO")
139
  structured_summaries = await summarizer.gerar_documento_final(
140
  vector_store,
 
173
  if isBubble:
174
  print("COMEÇANDO A REQUISIÇÃO FINAL PARA O BUBBLE")
175
  enviar_resposta_final(
176
+ serializer.doc_id,
177
+ serializer.form_response_id,
178
+ serializer.version,
179
  texto_completo_como_html,
180
  False,
181
  cast(str, titulo_do_documento),
_utils/gerar_relatorio_modelo_usuario/utils.py CHANGED
@@ -1,4 +1,4 @@
1
- from typing import List, Tuple
2
  from langchain_core.documents import Document
3
  from langchain_core.messages import HumanMessage
4
 
@@ -10,28 +10,28 @@ from _utils.gerar_relatorio_modelo_usuario.prompts import (
10
  )
11
  import re
12
 
 
13
 
14
- def gerar_resposta_compilada(serializer):
 
15
  return {
16
- "num_chunks_retrieval": serializer["num_chunks_retrieval"],
17
- "embedding_weight": serializer["embedding_weight"],
18
- "bm25_weight": serializer["bm25_weight"],
19
- "context_window": serializer["context_window"],
20
- "chunk_overlap": serializer["chunk_overlap"],
21
- "num_k_rerank": serializer["num_k_rerank"],
22
- "model_cohere_rerank": serializer["model_cohere_rerank"],
23
- "more_initial_chunks_for_reranking": serializer[
24
- "more_initial_chunks_for_reranking"
25
- ],
26
- "claude_context_model": serializer["claude_context_model"],
27
- "gpt_temperature": serializer["gpt_temperature"],
28
- "user_message": serializer["user_message"],
29
- "model": serializer["model"],
30
- "hf_embedding": serializer["hf_embedding"],
31
- "chunk_size": serializer["chunk_size"],
32
- "chunk_overlap": serializer["chunk_overlap"],
33
- # "prompt_auxiliar": serializer["prompt_auxiliar"],
34
- "prompt_gerar_documento": serializer["prompt_gerar_documento"][0:200],
35
  }
36
 
37
 
 
1
+ from typing import Any, List, Tuple, Union
2
  from langchain_core.documents import Document
3
  from langchain_core.messages import HumanMessage
4
 
 
10
  )
11
  import re
12
 
13
+ from gerar_documento.serializer import GerarDocumentoSerializerData
14
 
15
+
16
+ def gerar_resposta_compilada(serializer: Union[GerarDocumentoSerializerData, Any]):
17
  return {
18
+ "num_chunks_retrieval": serializer.num_chunks_retrieval,
19
+ "embedding_weight": serializer.embedding_weight,
20
+ "bm25_weight": serializer.bm25_weight,
21
+ "context_window": serializer.context_window,
22
+ "chunk_overlap": serializer.chunk_overlap,
23
+ "num_k_rerank": serializer.num_k_rerank,
24
+ "model_cohere_rerank": serializer.model_cohere_rerank,
25
+ "more_initial_chunks_for_reranking": serializer.more_initial_chunks_for_reranking,
26
+ "claude_context_model": serializer.claude_context_model,
27
+ "gpt_temperature": serializer.gpt_temperature,
28
+ "user_message": serializer.user_message,
29
+ "model": serializer.model,
30
+ "hf_embedding": serializer.hf_embedding,
31
+ "chunk_size": serializer.chunk_size,
32
+ "chunk_overlap": serializer.chunk_overlap,
33
+ # "prompt_auxiliar": serializer.prompt_auxiliar,
34
+ "prompt_gerar_documento": serializer.prompt_gerar_documento[0:200],
 
 
35
  }
36
 
37
 
_utils/langchain_utils/embeddings.py CHANGED
@@ -1,5 +1,3 @@
1
- import os
2
-
3
  from pydantic import Secret
4
  from setup.easy_imports import OpenAIEmbeddings
5
  from setup.tokens import openai_api_key
 
 
 
1
  from pydantic import Secret
2
  from setup.easy_imports import OpenAIEmbeddings
3
  from setup.tokens import openai_api_key
_utils/ragas.py CHANGED
@@ -1,6 +1,6 @@
1
  import os
2
  from langchain_community.document_loaders import PyPDFLoader
3
- from _utils.resumo_completo_cursor import GerarDocumento, RetrievalConfig
4
  from rest_framework.response import Response
5
  from ragas import evaluate
6
 
 
1
  import os
2
  from langchain_community.document_loaders import PyPDFLoader
3
+ from _utils.gerar_documento import GerarDocumento, RetrievalConfig
4
  from rest_framework.response import Response
5
  from ragas import evaluate
6
 
gerar_documento/serializer.py CHANGED
@@ -1,9 +1,12 @@
 
 
1
  from rest_framework import serializers
2
  from _utils.gerar_relatorio_modelo_usuario.prompts import (
3
  prompt_gerar_documento,
4
  prompt_auxiliar_padrao,
5
  )
6
  from setup.environment import default_model
 
7
 
8
  user_message = "What are the main points of this document?"
9
 
@@ -29,12 +32,30 @@ class GerarDocumentoInitialSerializer(serializers.Serializer):
29
  chunk_overlap = serializers.IntegerField(required=False, default=800)
30
 
31
 
 
 
 
 
 
 
 
 
 
 
 
32
  class FileInfoSerializer(serializers.Serializer):
33
  unique_id = serializers.CharField(max_length=255)
34
  tipo_arquivo = serializers.CharField(max_length=255)
35
  link_arquivo = serializers.URLField()
36
 
37
 
 
 
 
 
 
 
 
38
  class GerarDocumentoSerializer(GerarDocumentoInitialSerializer):
39
  system_prompt = None
40
 
@@ -74,6 +95,34 @@ class GerarDocumentoSerializer(GerarDocumentoInitialSerializer):
74
  form_response_id = serializers.CharField(required=True)
75
  version = serializers.CharField(required=True)
76
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
 
78
  class GerarDocumentoComPDFProprioSerializer(GerarDocumentoInitialSerializer):
79
  system_prompt = None
@@ -103,6 +152,29 @@ class GerarDocumentoComPDFProprioSerializer(GerarDocumentoInitialSerializer):
103
  should_use_llama_parse = serializers.BooleanField(required=False, default=False) # type: ignore
104
  llm_ultimas_requests = serializers.CharField(required=False, default="gpt-4o-mini")
105
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
 
107
  class GerarEmentaSerializer(serializers.Serializer):
108
  files = serializers.ListField(child=FileInfoSerializer(), required=True)
 
1
+ from dataclasses import dataclass, field
2
+ from typing import List, Optional
3
  from rest_framework import serializers
4
  from _utils.gerar_relatorio_modelo_usuario.prompts import (
5
  prompt_gerar_documento,
6
  prompt_auxiliar_padrao,
7
  )
8
  from setup.environment import default_model
9
+ from django.core.files.uploadedfile import UploadedFile
10
 
11
  user_message = "What are the main points of this document?"
12
 
 
32
  chunk_overlap = serializers.IntegerField(required=False, default=800)
33
 
34
 
35
+ @dataclass
36
+ class GerarDocumentoInitialSerializerData:
37
+ files: List[dict]
38
+ system_prompt: str = prompt_template
39
+ user_message: str = ""
40
+ model: str = default_model
41
+ hf_embedding: str = "all-MiniLM-L6-v2"
42
+ chunk_size: int = 3500
43
+ chunk_overlap: int = 800
44
+
45
+
46
  class FileInfoSerializer(serializers.Serializer):
47
  unique_id = serializers.CharField(max_length=255)
48
  tipo_arquivo = serializers.CharField(max_length=255)
49
  link_arquivo = serializers.URLField()
50
 
51
 
52
+ @dataclass
53
+ class FileInfoSerializerData:
54
+ unique_id: str
55
+ tipo_arquivo: str
56
+ link_arquivo: str
57
+
58
+
59
  class GerarDocumentoSerializer(GerarDocumentoInitialSerializer):
60
  system_prompt = None
61
 
 
95
  form_response_id = serializers.CharField(required=True)
96
  version = serializers.CharField(required=True)
97
 
98
+ def get_obj(self):
99
+ return GerarDocumentoSerializerData(**self.validated_data) # type: ignore
100
+
101
+
102
+ @dataclass
103
+ class GerarDocumentoSerializerData(GerarDocumentoInitialSerializerData):
104
+ files: List[FileInfoSerializerData]
105
+ bubble_editor_version: str = "version-test"
106
+ prompt_gerar_documento: str = ""
107
+ user_message: str = ""
108
+ num_chunks_retrieval: int = 20
109
+ embedding_weight: float = 0.5
110
+ bm25_weight: float = 0.5
111
+ context_window: int = 3
112
+ chunk_overlap: int = 800
113
+ num_k_rerank: int = 20
114
+ model_cohere_rerank: str = "rerank-english-v2.0"
115
+ more_initial_chunks_for_reranking: int = 100
116
+ claude_context_model: str = "claude-3-haiku-20240307"
117
+ gpt_temperature: float = 0.0
118
+ id_modelo_do_usuario: Optional[int] = None
119
+ should_have_contextual_chunks: bool = False
120
+ should_use_llama_parse: bool = False
121
+ llm_ultimas_requests: str = "gemini-2.0-flash"
122
+ doc_id: str = ""
123
+ form_response_id: str = ""
124
+ version: str = ""
125
+
126
 
127
  class GerarDocumentoComPDFProprioSerializer(GerarDocumentoInitialSerializer):
128
  system_prompt = None
 
152
  should_use_llama_parse = serializers.BooleanField(required=False, default=False) # type: ignore
153
  llm_ultimas_requests = serializers.CharField(required=False, default="gpt-4o-mini")
154
 
155
+ def get_obj(self):
156
+ return GerarDocumentoSerializerData(**self.validated_data) # type: ignore
157
+
158
+
159
+ @dataclass
160
+ class GerarDocumentoComPDFProprioData(GerarDocumentoInitialSerializerData):
161
+ prompt_gerar_documento: Optional[str] = field(default=None)
162
+ user_message: Optional[str] = field(default=None)
163
+ num_chunks_retrieval: int = field(default=20)
164
+ embedding_weight: float = field(default=0.5)
165
+ bm25_weight: float = field(default=0.5)
166
+ context_window: int = field(default=3)
167
+ chunk_overlap: int = field(default=800)
168
+ num_k_rerank: int = field(default=20)
169
+ model_cohere_rerank: str = field(default="rerank-english-v2.0")
170
+ more_initial_chunks_for_reranking: int = field(default=100)
171
+ claude_context_model: str = field(default="claude-3-haiku-20240307")
172
+ gpt_temperature: float = field(default=0.0)
173
+ id_modelo_do_usuario: int = field(default=11)
174
+ should_have_contextual_chunks: bool = field(default=False)
175
+ should_use_llama_parse: bool = field(default=False)
176
+ llm_ultimas_requests: str = field(default="gpt-4o-mini")
177
+
178
 
179
  class GerarEmentaSerializer(serializers.Serializer):
180
  files = serializers.ListField(child=FileInfoSerializer(), required=True)
gerar_documento/views.py CHANGED
@@ -14,8 +14,8 @@ from setup.easy_imports import (
14
  )
15
  from datetime import datetime
16
  from _utils.handle_files import handle_pdf_files_from_serializer, remove_pdf_temp_files
17
- from _utils.resumo_completo_cursor import (
18
- get_llm_summary_answer_by_cursor_complete,
19
  )
20
  from _utils.gerar_relatorio_modelo_usuario.prompts import prompt_auxiliar_inicio
21
  from .serializer import (
@@ -39,6 +39,7 @@ class GerarDocumentoView(AsyncAPIView):
39
  print("request.data: ", request.data)
40
  serializer = GerarDocumentoSerializer(data=request.data)
41
  if serializer.is_valid(raise_exception=True):
 
42
  if not serializer.validated_data:
43
  raise ValueError("Erro no validated_data")
44
 
@@ -48,24 +49,15 @@ class GerarDocumentoView(AsyncAPIView):
48
  print("\ndata: ", data)
49
  self.serializer = data
50
 
51
- # data["prompt_auxiliar"] = (
52
- # prompt_auxiliar_inicio + "\n" + data["prompt_auxiliar"]
53
- # )
54
-
55
- # listaPDFs = handle_pdf_files_from_serializer(data["files"])
56
  listaPDFs = [l["link_arquivo"] for l in data["files"]]
57
 
58
  print("\n\nlistaPDFs: ", listaPDFs)
59
 
60
- resposta_llm = await get_llm_summary_answer_by_cursor_complete(
61
- data, listaPDFs, True
62
- )
63
  print("\n\nresposta_llm: ", resposta_llm)
64
 
65
  # remove_pdf_temp_files(listaPDFs)
66
 
67
- # print("PRÓXIMA LINHA ENVIA A RESPOSTA A QUEM FEZ A REQUISIÇÃO")
68
-
69
  # asyncio.create_task(proccess_data_after_response())
70
  loop = asyncio.get_running_loop()
71
  loop.run_in_executor(
@@ -89,14 +81,13 @@ class GerarDocumentoComPDFProprioView(AsyncAPIView):
89
  serializer = GerarDocumentoComPDFProprioSerializer(data=request.data)
90
  if serializer.is_valid(raise_exception=True):
91
  data = cast(Dict[str, Any], serializer.validated_data)
 
92
  print("\n\ndata: ", data)
93
  self.serializer = data
94
 
95
  listaPDFs = handle_pdf_files_from_serializer(data["files"])
96
 
97
- resposta_llm = await get_llm_summary_answer_by_cursor_complete(
98
- data, listaPDFs
99
- )
100
  print("\n\nresposta_llm: ", resposta_llm)
101
 
102
  remove_pdf_temp_files(listaPDFs)
 
14
  )
15
  from datetime import datetime
16
  from _utils.handle_files import handle_pdf_files_from_serializer, remove_pdf_temp_files
17
+ from _utils.gerar_documento import (
18
+ gerar_documento,
19
  )
20
  from _utils.gerar_relatorio_modelo_usuario.prompts import prompt_auxiliar_inicio
21
  from .serializer import (
 
39
  print("request.data: ", request.data)
40
  serializer = GerarDocumentoSerializer(data=request.data)
41
  if serializer.is_valid(raise_exception=True):
42
+ obj = serializer.get_obj() # type: ignore
43
  if not serializer.validated_data:
44
  raise ValueError("Erro no validated_data")
45
 
 
49
  print("\ndata: ", data)
50
  self.serializer = data
51
 
 
 
 
 
 
52
  listaPDFs = [l["link_arquivo"] for l in data["files"]]
53
 
54
  print("\n\nlistaPDFs: ", listaPDFs)
55
 
56
+ resposta_llm = await gerar_documento(obj, listaPDFs, True)
 
 
57
  print("\n\nresposta_llm: ", resposta_llm)
58
 
59
  # remove_pdf_temp_files(listaPDFs)
60
 
 
 
61
  # asyncio.create_task(proccess_data_after_response())
62
  loop = asyncio.get_running_loop()
63
  loop.run_in_executor(
 
81
  serializer = GerarDocumentoComPDFProprioSerializer(data=request.data)
82
  if serializer.is_valid(raise_exception=True):
83
  data = cast(Dict[str, Any], serializer.validated_data)
84
+ obj = serializer.get_obj() # type: ignore
85
  print("\n\ndata: ", data)
86
  self.serializer = data
87
 
88
  listaPDFs = handle_pdf_files_from_serializer(data["files"])
89
 
90
+ resposta_llm = await gerar_documento(obj, listaPDFs)
 
 
91
  print("\n\nresposta_llm: ", resposta_llm)
92
 
93
  remove_pdf_temp_files(listaPDFs)
ragas_api/views.py CHANGED
@@ -3,8 +3,8 @@ import tempfile, os
3
  from rest_framework.response import Response
4
 
5
  from _utils.ragas import test_ragas
6
- from _utils.resumo_completo_cursor import (
7
- get_llm_summary_answer_by_cursor_complete,
8
  )
9
  from .serializer import (
10
  RagasFromTextSerializer,
@@ -96,7 +96,7 @@ class RagasFromTextView(APIView):
96
 
97
  for i in range(len(reference)):
98
  serializer.validated_data["user_message"] = data["user_input"][i]
99
- resposta_llm = get_llm_summary_answer_by_cursor_complete(
100
  serializer.validated_data, contexto=reference[i]
101
  )
102
  data["response"].append(resposta_llm["texto_completo"])
 
3
  from rest_framework.response import Response
4
 
5
  from _utils.ragas import test_ragas
6
+ from _utils.gerar_documento import (
7
+ gerar_documento,
8
  )
9
  from .serializer import (
10
  RagasFromTextSerializer,
 
96
 
97
  for i in range(len(reference)):
98
  serializer.validated_data["user_message"] = data["user_input"][i]
99
+ resposta_llm = gerar_documento(
100
  serializer.validated_data, contexto=reference[i]
101
  )
102
  data["response"].append(resposta_llm["texto_completo"])