File size: 9,409 Bytes
5845ef3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
import asyncio
import json
import os  # Import the os module
from playwright.async_api import async_playwright


class HuggingChatAPI:
    def __init__(self, headless=True):
        self.username = os.environ.get("HUGGINGFACE_USERNAME")
        self.password = os.environ.get("HUGGINGFACE_PASSWORD")
        if not self.username or not self.password:
            raise ValueError("HUGGINGFACE_USERNAME and HUGGINGFACE_PASSWORD environment variables must be set.")

        self.headless = headless
        self.playwright = None
        self.browser = None
        self.page = None
        self.current_model = None
        self.conversation_history = []  # Store conversation history

    async def start(self):
        self.playwright = await async_playwright().start()
        self.browser = await self.playwright.chromium.launch(headless=self.headless)
        self.page = await self.browser.new_page()
        await self.page.set_viewport_size({"width": 1161, "height": 813})
        await self.login()

    async def stop(self):
        if self.browser:
            await self.browser.close()
        if self.playwright:
            await self.playwright.stop()

    async def login(self):
        await self.page.goto("https://huggingface.co/chat/")
        await self.page.locator('body > div:nth-of-type(2) span').click()  # "with Hugging Face" button
        await self.page.locator('::-p-aria(Sign in  with Hugging Face) >>>> ::-p-aria([role="image"])').click()
        await self.page.locator('::-p-aria(Username or Email address)').fill(self.username)
        await self.page.locator('::-p-aria(Password)').fill(self.password)
        await self.page.locator('::-p-aria(Login)').click()
        await self.page.wait_for_url("https://huggingface.co/chat/")  # Wait for successful login


    async def select_model(self, model_name):
        """Selects a model by name (partial matching)."""

        await self.page.locator('::-p-aria(Models 11)').click()

        model_locator = None
        if model_name == "meta-llama/Llama-3.3-70B-Instruct":
            model_locator = self.page.locator('::-p-aria(View details for meta-llama/Llama-3.3-70B-Instruct)')
        elif model_name == "Qwen/Qwen2.5-72B-Instruct":
            model_locator = self.page.locator('::-p-aria(View details for Qwen/Qwen2.5-72B-Instruct)')
        elif model_name == "CohereForAI/c4ai-command-r-plus-08-2024":
            model_locator = self.page.locator('::-p-aria(View details for CohereForAI/c4ai-command-r-plus-08-2024)')
        elif model_name == "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B":
             model_locator = self.page.locator('::-p-aria(View details for deepseek-ai/DeepSeek-R1-Distill-Qwen-32B)')
        elif model_name == "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF":
            model_locator = self.page.locator('::-p-aria(View details for nvidia/Llama-3.1-Nemotron-70B-Instruct-HF)')
        elif model_name == "Qwen/QwQ-32B-Preview":
            model_locator = self.page.locator('::-p-aria(View details for Qwen/QwQ-32B-Preview)')
        elif model_name == "Qwen/Qwen2.5-Coder-32B-Instruct":
            model_locator = self.page.locator('::-p-aria(View details for Qwen/Qwen2.5-Coder-32B-Instruct)')
        elif model_name == "meta-llama/Llama-3.2-11B-Vision-Instruct":
            model_locator = self.page.locator('::-p-aria(View details for meta-llama/Llama-3.2-11B-Vision-Instruct)')
        elif model_name == "NousResearch/Hermes-3-Llama-3.1-8B":
            model_locator = self.page.locator('::-p-aria(View details for NousResearch/Hermes-3-Llama-3.1-8B)')
        elif model_name == "mistralai/Mistral-Nemo-Instruct-2407":
            model_locator = self.page.locator('::-p-aria(View details for mistralai/Mistral-Nemo-Instruct-2407)')
        elif model_name == "microsoft/Phi-3.5-mini-instruct":
            model_locator = self.page.locator('::-p-aria(View details for microsoft/Phi-3.5-mini-instruct)')
        else:
            raise ValueError(f"Model '{model_name}' not found in the predefined list.")


        if model_locator:
            await model_locator.click()
            self.current_model = model_name
        else:
            raise ValueError(f"Model '{model_name}' selection failed.")
        
    async def set_system_prompt(self, system_prompt):
        """Sets the system prompt."""
        await self.page.locator("div.p-3 path").click()
        await self.page.locator('::-p-aria(Custom system prompt)').fill(system_prompt)
        await self.page.locator('::-p-aria(New chat)').click()


    async def toggle_web_search(self, enable: bool):
      """Enable or disable web search."""
      await self.page.locator('::-p-aria(Search)').click() #open tools
      code_executor_button = self.page.locator('::-p-aria(Code Executor)')

      if enable:
            # Check if already enabled
            if "bg-gray-100" not in await code_executor_button.get_attribute("class"):
                await code_executor_button.click()  # Enable
      else:
            # Check if already disabled
            if "bg-gray-100" in await code_executor_button.get_attribute("class"):
                await code_executor_button.click() #click to open
                await self.page.locator('::-p-aria(Deactivate)').click() # Disable
      await self.page.locator('div:nth-of-type(2) > div > div > button path').click() #close tools



    async def send_message(self, message: str, expect_response: bool = True, timeout: float = 60.0):
        """Sends a message and optionally waits for a response."""

        self.conversation_history.append({"role": "user", "content": message})
        await self.page.locator('::-p-aria(Ask anything)').fill(message)
        await self.page.locator('::-p-aria(Send message)').click()

        if expect_response:
            try:
                # Wait for a new message bubble to appear.  This is a more robust way
                # to detect a response than just waiting a fixed amount of time.
                await self.page.locator('.group.self-end').last.wait_for(timeout=timeout * 1000)  # milliseconds
                response_elements = await self.page.locator('.group.self-end > .markdown').all()
                response = await response_elements[-1].inner_text() # Get the last response
                self.conversation_history.append({"role": "assistant", "content": response})
                return response

            except Exception as e:
                print(f"Error waiting for response: {e}")
                return None  # Or raise the exception, depending on your needs
        else:
            return None



    async def new_chat(self):
        """Starts a new chat."""
        await self.page.locator('::-p-aria(New chat)').click()
        self.conversation_history = []  # Clear history for the new chat


    async def chat(self, message: str, model_name: str = None, system_prompt: str = None, web_search: bool = False, timeout: float = 60.0):
        """
        Combined method for setting parameters and sending a message.

        Args:
            message: The user's message.
            model_name:  The name of the model to use.
            system_prompt: The system prompt to set.
            web_search: Whether to enable web search.
            timeout: Timeout for waiting for a response, in seconds.

        Returns:
            The model's response, or None if no response is received.
        """
        if model_name and model_name != self.current_model:
            await self.new_chat()  # Start a new chat when changing models
            await self.select_model(model_name)

        if system_prompt is not None:  # Allow empty string "" as a valid prompt
            await self.set_system_prompt(system_prompt)

        if web_search is not None:
            await self.toggle_web_search(web_search)

        return await self.send_message(message, timeout=timeout)


async def main():
    # --- Example Usage ---
    chat_api = HuggingChatAPI(headless=False)  # Set headless=False for debugging
    try:
        await chat_api.start()

        # Example 1: Simple chat with default model
        response1 = await chat_api.chat("What is the capital of France?")
        print(f"Response 1: {response1}")

        # Example 2: Chat with a specific model and system prompt
        response2 = await chat_api.chat("Explain the theory of relativity.",
                                        model_name="Qwen/Qwen2.5-72B-Instruct",
                                        system_prompt="You are a helpful and concise assistant.")
        print(f"Response 2: {response2}")

        # Example 3:  Enable web search
        response3 = await chat_api.chat("What is the latest news on AI?",
                                        model_name="CohereForAI/c4ai-command-r-plus-08-2024",
                                        web_search=True)
        print(f"Response 3: {response3}")

        # Example 4: Continue the first conversation
        response4 = await chat_api.chat("And what about Germany?")  # No model/prompt change
        print(f"Response 4: {response4}")

        # Example 5: Get conversation history
        print("\nConversation History:")
        for message in chat_api.conversation_history:
            print(f"- {message['role']}: {message['content']}")


    finally:
        await chat_api.stop()

if __name__ == "__main__":
    asyncio.run(main())