Suriya13 commited on
Commit
448071f
·
verified ·
1 Parent(s): 1c90314

Rename app.java to app.py

Browse files
Files changed (2) hide show
  1. app.java +0 -34
  2. app.py +27 -0
app.java DELETED
@@ -1,34 +0,0 @@
1
- import java.net.URI;
2
- import java.net.http.HttpClient;
3
- import java.net.http.HttpRequest;
4
- import java.net.http.HttpResponse;
5
- import java.net.http.HttpRequest.BodyPublishers;
6
-
7
- public class MistralChatbot {
8
-
9
- private static final String API_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.1";
10
- private static final String API_TOKEN = "YOUR_HF_API_TOKEN";
11
-
12
- public static void main(String[] args) throws Exception {
13
- String prompt = "Explain the difference between Java and Python.";
14
- String response = sendPromptToModel(prompt);
15
- System.out.println("AI Response:\n" + response);
16
- }
17
-
18
- public static String sendPromptToModel(String prompt) throws Exception {
19
- HttpClient client = HttpClient.newHttpClient();
20
-
21
- String requestBody = "{ \"inputs\": \"" + prompt.replace("\"", "\\\"") + "\" }";
22
-
23
- HttpRequest request = HttpRequest.newBuilder()
24
- .uri(URI.create(API_URL))
25
- .header("Authorization", "Bearer " + API_TOKEN)
26
- .header("Content-Type", "application/json")
27
- .POST(BodyPublishers.ofString(requestBody))
28
- .build();
29
-
30
- HttpResponse<String> response = client.send(request, HttpResponse.BodyHandlers.ofString());
31
-
32
- return response.body();
33
- }
34
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
+ import torch
4
+
5
+ model_id = "mistralai/Mistral-7B-Instruct-v0.1"
6
+
7
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
8
+ model = AutoModelForCausalLM.from_pretrained(
9
+ model_id,
10
+ device_map="auto",
11
+ torch_dtype=torch.float16,
12
+ load_in_4bit=True
13
+ )
14
+
15
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
16
+
17
+ def chat(prompt, history=[]):
18
+ full_prompt = prompt
19
+ output = pipe(full_prompt, max_new_tokens=200, do_sample=True, temperature=0.7)
20
+ return output[0]["generated_text"]
21
+
22
+ gr.ChatInterface(
23
+ fn=chat,
24
+ title="🧠 Mistral 7B Instruct Chatbot",
25
+ description="This chatbot is powered by the open-source Mistral 7B LLM. Ask anything!",
26
+ theme="soft"
27
+ ).launch()