Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -8,9 +8,9 @@ import pytesseract
|
|
8 |
from pdf2image import convert_from_path
|
9 |
from huggingface_hub import InferenceClient
|
10 |
|
11 |
-
# Initialize
|
12 |
hf_token = os.getenv("HF_TOKEN")
|
13 |
-
client = InferenceClient(model="mistralai/
|
14 |
|
15 |
def extract_excel_data(file_path):
|
16 |
"""Extract text from Excel file"""
|
@@ -93,11 +93,11 @@ RULES:
|
|
93 |
"""
|
94 |
|
95 |
try:
|
96 |
-
# Call LLM
|
97 |
response = client.text_generation(
|
98 |
prompt,
|
99 |
max_new_tokens=2000,
|
100 |
-
temperature=0.01,
|
101 |
stop_sequences=["</s>"]
|
102 |
)
|
103 |
print(f"LLM Response: {response}")
|
|
|
8 |
from pdf2image import convert_from_path
|
9 |
from huggingface_hub import InferenceClient
|
10 |
|
11 |
+
# Initialize with a reliable free model that supports text-generation
|
12 |
hf_token = os.getenv("HF_TOKEN")
|
13 |
+
client = InferenceClient(model="mistralai/Mistral-7B-Instruct-v0.2", token=hf_token)
|
14 |
|
15 |
def extract_excel_data(file_path):
|
16 |
"""Extract text from Excel file"""
|
|
|
93 |
"""
|
94 |
|
95 |
try:
|
96 |
+
# Call LLM via Hugging Face Inference API
|
97 |
response = client.text_generation(
|
98 |
prompt,
|
99 |
max_new_tokens=2000,
|
100 |
+
temperature=0.01,
|
101 |
stop_sequences=["</s>"]
|
102 |
)
|
103 |
print(f"LLM Response: {response}")
|