Update app.py
Browse files
app.py
CHANGED
@@ -9,20 +9,16 @@ class LoRAInferenceWrapper:
|
|
9 |
self.client = InferenceClient(model_id, token=token)
|
10 |
|
11 |
def load_lora_weights(self):
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
# Load the LoRA weights from the local file
|
24 |
-
with open(lora_model_path, "rb") as f:
|
25 |
-
return f.read() # Return the raw bytes of the LoRA file
|
26 |
|
27 |
def preprocess_lora_weights(self, lora_weights):
|
28 |
# Preprocess the LoRA weights (e.g., Base64 encoding for JSON compatibility)
|
|
|
9 |
self.client = InferenceClient(model_id, token=token)
|
10 |
|
11 |
def load_lora_weights(self):
|
12 |
+
# Define the path to the LoRA model
|
13 |
+
lora_model_path = "./lora.model.pth" # Update to the actual file name
|
14 |
+
|
15 |
+
# Check if the file exists at the given path
|
16 |
+
if os.path.exists(lora_model_path):
|
17 |
+
print(f"Found LoRA model at: {lora_model_path}")
|
18 |
+
with open(lora_model_path, 'rb') as f:
|
19 |
+
return f.read() # Load the file content
|
20 |
+
else:
|
21 |
+
raise FileNotFoundError(f"LoRA model not found at path: {lora_model_path}")
|
|
|
|
|
|
|
|
|
22 |
|
23 |
def preprocess_lora_weights(self, lora_weights):
|
24 |
# Preprocess the LoRA weights (e.g., Base64 encoding for JSON compatibility)
|