gabykim commited on
Commit
f4fd6ee
·
1 Parent(s): c21dda6

LLM batch processing draft

Browse files
src/know_lang_bot/models/batch_request.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ from know_lang_bot.config import ModelProvider, LLMConfig
3
+ from pydantic_ai.messages import ModelMessage
4
+
5
+ def _process_anthropic_batch(batched_input: List[List[ModelMessage]], config: LLMConfig) -> List[str]:
6
+ """Helper function to process Anthropic LLM requests in batch."""
7
+ import anthropic
8
+ from anthropic.types.message_create_params import MessageCreateParamsNonStreaming
9
+ from anthropic.types.messages.batch_create_params import Request
10
+ from pydantic_ai.models.anthropic import AnthropicAgentModel
11
+
12
+ client = anthropic.Anthropic()
13
+ requests : List[Request] = []
14
+ for batch in batched_input:
15
+ system_prompt, anthropic_prompt = AnthropicAgentModel._map_message(batch)
16
+ requests.append(
17
+ Request(
18
+ custom_id="my-first-request",
19
+ params=MessageCreateParamsNonStreaming(
20
+ model=config.model_name,
21
+ max_tokens=1024,
22
+ messages=system_prompt,
23
+ messages=anthropic_prompt,
24
+ )
25
+ )
26
+ )
27
+
28
+ message_batch = client.messages.batches.create(
29
+ requests=requests,
30
+ )
31
+
32
+ print(message_batch)
33
+
34
+
35
+ def batch_process_requests(batched_input: List[List[ModelMessage]], config: LLMConfig) -> List[str]:
36
+ if config.model_provider == ModelProvider.ANTHROPIC:
37
+ return _process_anthropic_batch(batched_input, config)
38
+ else:
39
+ raise ValueError("Unsupported model provider for batch request processing")