Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -6,6 +6,7 @@ import gradio as gr
|
|
6 |
import spaces
|
7 |
import torch
|
8 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
|
|
9 |
|
10 |
DESCRIPTION = """\
|
11 |
# Llama 3.2 3B Instruct
|
@@ -18,20 +19,36 @@ For more details, please check [our post](https://huggingface.co/blog/llama32).
|
|
18 |
MAX_MAX_NEW_TOKENS = 2048
|
19 |
DEFAULT_MAX_NEW_TOKENS = 1024
|
20 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
21 |
-
|
22 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
23 |
|
24 |
-
|
25 |
-
|
26 |
model = AutoModelForCausalLM.from_pretrained(
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
)
|
31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
|
34 |
-
@spaces.GPU(duration=
|
35 |
def generate(
|
36 |
message: str,
|
37 |
chat_history: list[tuple[str, str]],
|
|
|
6 |
import spaces
|
7 |
import torch
|
8 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
9 |
+
from peft import AutoPeftModelForCausalLM
|
10 |
|
11 |
DESCRIPTION = """\
|
12 |
# Llama 3.2 3B Instruct
|
|
|
19 |
MAX_MAX_NEW_TOKENS = 2048
|
20 |
DEFAULT_MAX_NEW_TOKENS = 1024
|
21 |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
22 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
23 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
24 |
|
25 |
+
model_name = "tiiuae/Falcon3-10B-Instruct"
|
26 |
+
|
27 |
model = AutoModelForCausalLM.from_pretrained(
|
28 |
+
model_name,
|
29 |
+
torch_dtype="auto",
|
30 |
+
device_map="auto"
|
31 |
)
|
32 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
33 |
+
|
34 |
+
peft_model = AutoPeftModelForCausalLM.from_pretrained("ehristoforu/falconthink-10b-lora")
|
35 |
+
merged_model = peft_model.merge_and_unload()
|
36 |
+
merged_model.save_pretrained("./falconthink")
|
37 |
+
|
38 |
+
from huggingface_hub import HfApi
|
39 |
|
40 |
+
api = HfApi()
|
41 |
+
|
42 |
+
|
43 |
+
|
44 |
+
api.upload_folder(
|
45 |
+
folder_path="./falconthink",
|
46 |
+
repo_id="ehristoforu/FalconThink-10B-IT",
|
47 |
+
repo_type="model",
|
48 |
+
token=HF_TOKEN,
|
49 |
+
)
|
50 |
|
51 |
+
@spaces.GPU(duration=240)
|
52 |
def generate(
|
53 |
message: str,
|
54 |
chat_history: list[tuple[str, str]],
|