chore: minor updates
Browse files- app.py +8 -8
- requirements.txt +2 -1
app.py
CHANGED
@@ -38,7 +38,7 @@ model = Llama.from_pretrained(
|
|
38 |
**model_paths[DEFAULT_MODEL],
|
39 |
n_ctx=4096,
|
40 |
n_threads=4,
|
41 |
-
cache_dir='
|
42 |
)
|
43 |
|
44 |
def generate_alpaca_prompt(
|
@@ -105,13 +105,13 @@ def on_model_changed(model_name: str):
|
|
105 |
cache_dir='./hf-cache'
|
106 |
)
|
107 |
|
108 |
-
app_title_mark = gr.Markdown(f"""<center><font size=
|
109 |
chatbot = gr.Chatbot(
|
110 |
type='messages',
|
111 |
height=500,
|
112 |
-
placeholder='<strong>Hi, I have a headache, what should I do?</strong>',
|
113 |
label=model_name,
|
114 |
-
avatar_images=[None, '
|
115 |
)
|
116 |
return app_title_mark, chatbot
|
117 |
|
@@ -130,7 +130,7 @@ def main() -> None:
|
|
130 |
max_new_tokens = gr.Number(value=512, minimum=64, maximum=2048, label='Max new tokens')
|
131 |
|
132 |
with gr.Row():
|
133 |
-
temperature = gr.Slider(0, 2, step=0.01, label='Temperature', value=0.6
|
134 |
repeatition_penalty = gr.Slider(0.01, 5, step=0.05, label='Repetition penalty', value=1.1)
|
135 |
|
136 |
with gr.Row():
|
@@ -141,12 +141,12 @@ def main() -> None:
|
|
141 |
chatbot = gr.Chatbot(
|
142 |
type='messages',
|
143 |
height=500,
|
144 |
-
placeholder='<strong>Hi, I have a headache, what should I do?</strong>',
|
145 |
label=DEFAULT_MODEL,
|
146 |
-
avatar_images=[None, '
|
147 |
)
|
148 |
textbox = gr.Textbox(
|
149 |
-
placeholder='Hi, I have a headache, what should I do?',
|
150 |
container=False,
|
151 |
submit_btn=True,
|
152 |
stop_btn=True,
|
|
|
38 |
**model_paths[DEFAULT_MODEL],
|
39 |
n_ctx=4096,
|
40 |
n_threads=4,
|
41 |
+
cache_dir='./.hf_cache'
|
42 |
)
|
43 |
|
44 |
def generate_alpaca_prompt(
|
|
|
105 |
cache_dir='./hf-cache'
|
106 |
)
|
107 |
|
108 |
+
app_title_mark = gr.Markdown(f"""<center><font size=18>{model_name}</center>""")
|
109 |
chatbot = gr.Chatbot(
|
110 |
type='messages',
|
111 |
height=500,
|
112 |
+
placeholder='<strong>Hi doctor, I have a headache, what should I do?</strong>',
|
113 |
label=model_name,
|
114 |
+
avatar_images=[None, 'https://raw.githubusercontent.com/minhnguyent546/medical-llama2/demo/assets/med_alpaca.png'], # pyright: ignore[reportArgumentType]
|
115 |
)
|
116 |
return app_title_mark, chatbot
|
117 |
|
|
|
130 |
max_new_tokens = gr.Number(value=512, minimum=64, maximum=2048, label='Max new tokens')
|
131 |
|
132 |
with gr.Row():
|
133 |
+
temperature = gr.Slider(0, 2, step=0.01, label='Temperature', value=0.6)
|
134 |
repeatition_penalty = gr.Slider(0.01, 5, step=0.05, label='Repetition penalty', value=1.1)
|
135 |
|
136 |
with gr.Row():
|
|
|
141 |
chatbot = gr.Chatbot(
|
142 |
type='messages',
|
143 |
height=500,
|
144 |
+
placeholder='<strong>Hi doctor, I have a headache, what should I do?</strong>',
|
145 |
label=DEFAULT_MODEL,
|
146 |
+
avatar_images=[None, 'https://raw.githubusercontent.com/minhnguyent546/medical-llama2/demo/assets/med_alpaca.png'], # pyright: ignore[reportArgumentType]
|
147 |
)
|
148 |
textbox = gr.Textbox(
|
149 |
+
placeholder='Hi doctor, I have a headache, what should I do?',
|
150 |
container=False,
|
151 |
submit_btn=True,
|
152 |
stop_btn=True,
|
requirements.txt
CHANGED
@@ -1,3 +1,4 @@
|
|
1 |
gradio~=5.6.0
|
2 |
-
|
|
|
3 |
llama-cpp-python~=0.3.2
|
|
|
1 |
gradio~=5.6.0
|
2 |
+
hf_transfer~=0.1.8
|
3 |
+
huggingface_hub~=0.25.2
|
4 |
llama-cpp-python~=0.3.2
|