Spaces:
Running
Running
"""Fake Chat Model wrapper for testing purposes.""" | |
from typing import Any, List, Mapping, Optional | |
from langchain.callbacks.manager import ( | |
AsyncCallbackManagerForLLMRun, | |
CallbackManagerForLLMRun, | |
) | |
from langchain.chat_models.base import SimpleChatModel | |
from langchain.schema import AIMessage, BaseMessage, ChatGeneration, ChatResult | |
class FakeChatModel(SimpleChatModel): | |
"""Fake Chat Model wrapper for testing purposes.""" | |
def _call( | |
self, | |
messages: List[BaseMessage], | |
stop: Optional[List[str]] = None, | |
run_manager: Optional[CallbackManagerForLLMRun] = None, | |
) -> str: | |
return "fake response" | |
async def _agenerate( | |
self, | |
messages: List[BaseMessage], | |
stop: Optional[List[str]] = None, | |
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, | |
) -> ChatResult: | |
output_str = "fake response" | |
message = AIMessage(content=output_str) | |
generation = ChatGeneration(message=message) | |
return ChatResult(generations=[generation]) | |
def _llm_type(self) -> str: | |
return "fake-chat-model" | |
def _identifying_params(self) -> Mapping[str, Any]: | |
return {"key": "fake"} | |