Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,10 +1,4 @@
|
|
1 |
-
# -*- coding: utf-8
|
2 |
-
"""gemma_3n_colab.ipynb
|
3 |
-
|
4 |
-
Automatically generated by Colab.
|
5 |
-
|
6 |
-
Original file is located at
|
7 |
-
https://colab.research.google.com/drive/1U5pbaYG8qD7HFANwU7PI1jbLGOPvArnP
|
8 |
|
9 |
# π₯ Gemma 3N SOAP Note Generator
|
10 |
## Interactive Medical Documentation Assistant
|
@@ -12,11 +6,6 @@ Original file is located at
|
|
12 |
This notebook provides a complete interface for generating SOAP notes from medical text using the Gemma 3N model.
|
13 |
"""
|
14 |
|
15 |
-
# Install required packages
|
16 |
-
!pip install -q transformers torch torchvision torchaudio timm accelerate
|
17 |
-
!pip install -q ipywidgets gradio
|
18 |
-
!pip install -q --upgrade huggingface_hub
|
19 |
-
!pip install GPUtil
|
20 |
|
21 |
# Enable widgets
|
22 |
from IPython.display import display, HTML
|
@@ -34,18 +23,35 @@ from datetime import datetime
|
|
34 |
from huggingface_hub import login
|
35 |
import getpass
|
36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
# Authenticate with HuggingFace
|
38 |
print("π HuggingFace Authentication Required")
|
39 |
-
print("Please enter your HuggingFace token (it will be hidden):")
|
40 |
-
hf_token = getpass.getpass("HF Token: ")
|
41 |
|
|
|
|
|
|
|
|
|
|
|
42 |
try:
|
43 |
login(token=hf_token)
|
44 |
print("β
Successfully authenticated with HuggingFace!")
|
45 |
except Exception as e:
|
46 |
print(f"β Authentication failed: {e}")
|
47 |
print("Please check your token and try again.")
|
48 |
-
|
49 |
# Check GPU availability
|
50 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
51 |
print(f"π₯οΈ Using device: {device}")
|
@@ -703,4 +709,3 @@ show_system_info()
|
|
703 |
*π€ Powered by Google's Gemma 3N Model | π All processing performed locally*
|
704 |
"""
|
705 |
|
706 |
-
!gradio deploy
|
|
|
1 |
+
# -*- coding: utf-8
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
# π₯ Gemma 3N SOAP Note Generator
|
4 |
## Interactive Medical Documentation Assistant
|
|
|
6 |
This notebook provides a complete interface for generating SOAP notes from medical text using the Gemma 3N model.
|
7 |
"""
|
8 |
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
# Enable widgets
|
11 |
from IPython.display import display, HTML
|
|
|
23 |
from huggingface_hub import login
|
24 |
import getpass
|
25 |
|
26 |
+
# Authenticate with HuggingFace
|
27 |
+
# Replace the authentication section (lines around the getpass part) with this:
|
28 |
+
|
29 |
+
# Import libraries and authenticate
|
30 |
+
import torch
|
31 |
+
from transformers import AutoProcessor, AutoModelForImageTextToText
|
32 |
+
import gradio as gr
|
33 |
+
import ipywidgets as widgets
|
34 |
+
from IPython.display import display, clear_output
|
35 |
+
import io
|
36 |
+
import base64
|
37 |
+
from datetime import datetime
|
38 |
+
from huggingface_hub import login
|
39 |
+
import os
|
40 |
+
|
41 |
# Authenticate with HuggingFace
|
42 |
print("π HuggingFace Authentication Required")
|
|
|
|
|
43 |
|
44 |
+
# Try to get token from environment variable first (for production/HF Spaces)
|
45 |
+
hf_token = os.environ.get('HF_TOKEN') or os.environ.get('HUGGINGFACE_TOKEN')
|
46 |
+
|
47 |
+
if hf_token:
|
48 |
+
print("β
Found HF token in environment variables")
|
49 |
try:
|
50 |
login(token=hf_token)
|
51 |
print("β
Successfully authenticated with HuggingFace!")
|
52 |
except Exception as e:
|
53 |
print(f"β Authentication failed: {e}")
|
54 |
print("Please check your token and try again.")
|
|
|
55 |
# Check GPU availability
|
56 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
57 |
print(f"π₯οΈ Using device: {device}")
|
|
|
709 |
*π€ Powered by Google's Gemma 3N Model | π All processing performed locally*
|
710 |
"""
|
711 |
|
|