rashedalhuniti commited on
Commit
3e8a64e
·
verified ·
1 Parent(s): e770142

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -20
app.py CHANGED
@@ -1,24 +1,21 @@
1
- import os
2
- from huggingface_hub import InferenceClient
 
3
 
4
- # Set your Hugging Face API key as an environment variable for security
5
- os.environ["HUGGINGFACE_API_KEY"] = "hf_xxxxxxxxxxxxxxxxxxxxxxxx"
 
 
6
 
7
- # Initialize the client without the 'provider' argument
8
- client = InferenceClient()
 
 
 
9
 
10
- messages = [
11
- {
12
- "role": "user",
13
- "content": "What is the capital of France?"
14
- }
15
- ]
16
 
17
- # Use the chat API with the specified model and messages
18
- completion = client.chat.completions.create(
19
- model="meta-llama/Llama-2-7b-chat-hf",
20
- messages=messages,
21
- max_tokens=500
22
- )
23
-
24
- print(completion.choices[0].message)
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import torch
4
 
5
+ # Load model and tokenizer
6
+ model_name = "inceptionai/jais-13b"
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
+ model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
9
 
10
+ # Define chatbot function
11
+ def chat_with_jais(prompt):
12
+ inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
13
+ outputs = model.generate(**inputs, max_length=512)
14
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
15
 
16
+ # Gradio Interface
17
+ interface = gr.Interface(fn=chat_with_jais, inputs="text", outputs="text", title="JAIS-13B Chatbot")
 
 
 
 
18
 
19
+ # Launch the app
20
+ if __name__ == "__main__":
21
+ interface.launch()