Bonosa2 commited on
Commit
ed48c92
Β·
verified Β·
1 Parent(s): 0888006

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -16
app.py CHANGED
@@ -1,10 +1,4 @@
1
- # -*- coding: utf-8 -*-
2
- """gemma_3n_colab.ipynb
3
-
4
- Automatically generated by Colab.
5
-
6
- Original file is located at
7
- https://colab.research.google.com/drive/1U5pbaYG8qD7HFANwU7PI1jbLGOPvArnP
8
 
9
  # πŸ₯ Gemma 3N SOAP Note Generator
10
  ## Interactive Medical Documentation Assistant
@@ -12,11 +6,6 @@ Original file is located at
12
  This notebook provides a complete interface for generating SOAP notes from medical text using the Gemma 3N model.
13
  """
14
 
15
- # Install required packages
16
- !pip install -q transformers torch torchvision torchaudio timm accelerate
17
- !pip install -q ipywidgets gradio
18
- !pip install -q --upgrade huggingface_hub
19
- !pip install GPUtil
20
 
21
  # Enable widgets
22
  from IPython.display import display, HTML
@@ -34,18 +23,35 @@ from datetime import datetime
34
  from huggingface_hub import login
35
  import getpass
36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  # Authenticate with HuggingFace
38
  print("πŸ” HuggingFace Authentication Required")
39
- print("Please enter your HuggingFace token (it will be hidden):")
40
- hf_token = getpass.getpass("HF Token: ")
41
 
 
 
 
 
 
42
  try:
43
  login(token=hf_token)
44
  print("βœ… Successfully authenticated with HuggingFace!")
45
  except Exception as e:
46
  print(f"❌ Authentication failed: {e}")
47
  print("Please check your token and try again.")
48
-
49
  # Check GPU availability
50
  device = "cuda" if torch.cuda.is_available() else "cpu"
51
  print(f"πŸ–₯️ Using device: {device}")
@@ -703,4 +709,3 @@ show_system_info()
703
  *πŸ€– Powered by Google's Gemma 3N Model | πŸ”’ All processing performed locally*
704
  """
705
 
706
- !gradio deploy
 
1
+ # -*- coding: utf-8
 
 
 
 
 
 
2
 
3
  # πŸ₯ Gemma 3N SOAP Note Generator
4
  ## Interactive Medical Documentation Assistant
 
6
  This notebook provides a complete interface for generating SOAP notes from medical text using the Gemma 3N model.
7
  """
8
 
 
 
 
 
 
9
 
10
  # Enable widgets
11
  from IPython.display import display, HTML
 
23
  from huggingface_hub import login
24
  import getpass
25
 
26
+ # Authenticate with HuggingFace
27
+ # Replace the authentication section (lines around the getpass part) with this:
28
+
29
+ # Import libraries and authenticate
30
+ import torch
31
+ from transformers import AutoProcessor, AutoModelForImageTextToText
32
+ import gradio as gr
33
+ import ipywidgets as widgets
34
+ from IPython.display import display, clear_output
35
+ import io
36
+ import base64
37
+ from datetime import datetime
38
+ from huggingface_hub import login
39
+ import os
40
+
41
  # Authenticate with HuggingFace
42
  print("πŸ” HuggingFace Authentication Required")
 
 
43
 
44
+ # Try to get token from environment variable first (for production/HF Spaces)
45
+ hf_token = os.environ.get('HF_TOKEN') or os.environ.get('HUGGINGFACE_TOKEN')
46
+
47
+ if hf_token:
48
+ print("βœ… Found HF token in environment variables")
49
  try:
50
  login(token=hf_token)
51
  print("βœ… Successfully authenticated with HuggingFace!")
52
  except Exception as e:
53
  print(f"❌ Authentication failed: {e}")
54
  print("Please check your token and try again.")
 
55
  # Check GPU availability
56
  device = "cuda" if torch.cuda.is_available() else "cpu"
57
  print(f"πŸ–₯️ Using device: {device}")
 
709
  *πŸ€– Powered by Google's Gemma 3N Model | πŸ”’ All processing performed locally*
710
  """
711