Spaces:
Sleeping
Sleeping
add
Browse files
app.py
CHANGED
@@ -3,7 +3,7 @@ import torch
|
|
3 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
4 |
import gradio as gr
|
5 |
from peft import PeftModel
|
6 |
-
import spaces
|
7 |
|
8 |
# Define the base model ID
|
9 |
base_model_id = "meta-llama/Llama-2-13b-hf"
|
@@ -17,7 +17,7 @@ if not huggingface_token:
|
|
17 |
base_model = AutoModelForCausalLM.from_pretrained(
|
18 |
base_model_id,
|
19 |
trust_remote_code=True,
|
20 |
-
|
21 |
).to("cuda") # Move model to CUDA
|
22 |
|
23 |
# Load the tokenizer
|
@@ -25,7 +25,7 @@ tokenizer = AutoTokenizer.from_pretrained(
|
|
25 |
base_model_id,
|
26 |
add_bos_token=True,
|
27 |
trust_remote_code=True,
|
28 |
-
|
29 |
)
|
30 |
|
31 |
# Load the fine-tuned model and move to CUDA
|
@@ -54,6 +54,9 @@ def generate_skills(job_description):
|
|
54 |
else:
|
55 |
skills_text = generated_text[skills_start_index:].strip()
|
56 |
|
|
|
|
|
|
|
57 |
return skills_text
|
58 |
|
59 |
# Define the Gradio interface
|
@@ -61,4 +64,4 @@ inputs = gr.Textbox(lines=10, label="Job description:", placeholder="Enter or pa
|
|
61 |
outputs = gr.Textbox(label="Required skills:", placeholder="The required skills will be displayed here...")
|
62 |
|
63 |
gr.Interface(fn=generate_skills, inputs=inputs, outputs=outputs, title="Job Skills Analysis",
|
64 |
-
description="Paste the job description in the text box below and the model will show the required skills for candidates.").launch()
|
|
|
3 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
4 |
import gradio as gr
|
5 |
from peft import PeftModel
|
6 |
+
import spaces
|
7 |
|
8 |
# Define the base model ID
|
9 |
base_model_id = "meta-llama/Llama-2-13b-hf"
|
|
|
17 |
base_model = AutoModelForCausalLM.from_pretrained(
|
18 |
base_model_id,
|
19 |
trust_remote_code=True,
|
20 |
+
use_auth_token=huggingface_token # Use the correct parameter
|
21 |
).to("cuda") # Move model to CUDA
|
22 |
|
23 |
# Load the tokenizer
|
|
|
25 |
base_model_id,
|
26 |
add_bos_token=True,
|
27 |
trust_remote_code=True,
|
28 |
+
use_auth_token=huggingface_token
|
29 |
)
|
30 |
|
31 |
# Load the fine-tuned model and move to CUDA
|
|
|
54 |
else:
|
55 |
skills_text = generated_text[skills_start_index:].strip()
|
56 |
|
57 |
+
# Clear CUDA memory
|
58 |
+
torch.cuda.empty_cache()
|
59 |
+
|
60 |
return skills_text
|
61 |
|
62 |
# Define the Gradio interface
|
|
|
64 |
outputs = gr.Textbox(label="Required skills:", placeholder="The required skills will be displayed here...")
|
65 |
|
66 |
gr.Interface(fn=generate_skills, inputs=inputs, outputs=outputs, title="Job Skills Analysis",
|
67 |
+
description="Paste the job description in the text box below and the model will show the required skills for candidates.").launch(share=True)
|