37-AN commited on
Commit
403ced7
·
1 Parent(s): 207d24c

Fix 403 error by using local models

Browse files
Files changed (4) hide show
  1. Dockerfile +18 -7
  2. app/core/llm.py +42 -43
  3. deploy_to_hf.py +116 -49
  4. push_to_hf.py +66 -0
Dockerfile CHANGED
@@ -14,14 +14,15 @@ COPY requirements.txt .
14
  # Install Python dependencies
15
  RUN pip install --no-cache-dir -r requirements.txt
16
 
17
- # Create cache directories with proper permissions
18
  RUN mkdir -p /.cache && chmod 777 /.cache
19
  RUN mkdir -p /root/.cache && chmod 777 /root/.cache
20
  RUN mkdir -p /app/.cache && chmod 777 /app/.cache
 
 
21
 
22
- # Create models directory for Hugging Face
23
  RUN mkdir -p /app/models && chmod 777 /app/models
24
- ENV TRANSFORMERS_CACHE=/app/models
25
 
26
  # Copy the rest of the application
27
  COPY . .
@@ -30,18 +31,28 @@ COPY . .
30
  RUN mkdir -p data/documents data/vector_db && \
31
  chmod -R 777 data
32
 
33
- # Set environment variables
 
34
  ENV TOKENIZERS_PARALLELISM=false
35
  ENV HF_HOME=/app/.cache
36
  ENV XDG_CACHE_HOME=/app/.cache
37
  ENV HUGGINGFACEHUB_API_TOKEN=""
38
  ENV HF_API_KEY=""
39
- # Use completely open models that don't require API keys
 
 
40
  ENV LLM_MODEL="distilgpt2"
 
41
  ENV EMBEDDING_MODEL="sentence-transformers/all-MiniLM-L6-v2"
42
 
43
- # Expose the port required by Hugging Face Spaces
 
 
 
 
 
 
44
  EXPOSE 7860
45
 
46
- # Set the entrypoint command to run the Streamlit app on port 7860
47
  CMD ["streamlit", "run", "app/ui/streamlit_app.py", "--server.port=7860", "--server.address=0.0.0.0"]
 
14
  # Install Python dependencies
15
  RUN pip install --no-cache-dir -r requirements.txt
16
 
17
+ # Create all cache directories with proper permissions
18
  RUN mkdir -p /.cache && chmod 777 /.cache
19
  RUN mkdir -p /root/.cache && chmod 777 /root/.cache
20
  RUN mkdir -p /app/.cache && chmod 777 /app/.cache
21
+ RUN mkdir -p /tmp/.cache && chmod 777 /tmp/.cache
22
+ RUN mkdir -p /home/.cache && chmod 777 /home/.cache
23
 
24
+ # Create models directory with proper permissions
25
  RUN mkdir -p /app/models && chmod 777 /app/models
 
26
 
27
  # Copy the rest of the application
28
  COPY . .
 
31
  RUN mkdir -p data/documents data/vector_db && \
32
  chmod -R 777 data
33
 
34
+ # Set environment variables for cache locations
35
+ ENV TRANSFORMERS_CACHE=/app/models
36
  ENV TOKENIZERS_PARALLELISM=false
37
  ENV HF_HOME=/app/.cache
38
  ENV XDG_CACHE_HOME=/app/.cache
39
  ENV HUGGINGFACEHUB_API_TOKEN=""
40
  ENV HF_API_KEY=""
41
+
42
+ # Use small local models that don't require API access
43
+ # distilgpt2 is a small model that works well locally
44
  ENV LLM_MODEL="distilgpt2"
45
+ # all-MiniLM-L6-v2 is small and efficient for embeddings
46
  ENV EMBEDDING_MODEL="sentence-transformers/all-MiniLM-L6-v2"
47
 
48
+ # Set moderate temperature and token limit
49
+ ENV DEFAULT_TEMPERATURE=0.7
50
+ ENV MAX_TOKENS=256
51
+ ENV CHUNK_SIZE=512
52
+ ENV CHUNK_OVERLAP=128
53
+
54
+ # Expose port for Hugging Face Spaces
55
  EXPOSE 7860
56
 
57
+ # Run the Streamlit app on the correct port
58
  CMD ["streamlit", "run", "app/ui/streamlit_app.py", "--server.port=7860", "--server.address=0.0.0.0"]
app/core/llm.py CHANGED
@@ -5,6 +5,11 @@ from langchain.chains import LLMChain
5
  from langchain.prompts import PromptTemplate
6
  import sys
7
  import os
 
 
 
 
 
8
 
9
  # Add project root to path for imports
10
  sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
@@ -19,21 +24,39 @@ def get_llm():
19
  os.makedirs(cache_dir, exist_ok=True)
20
  os.chmod(cache_dir, 0o777)
21
  except Exception as e:
22
- print(f"Warning: Could not create cache directory: {e}")
23
  cache_dir = None
24
 
25
- # Set environment variable for Hugging Face Hub
26
- os.environ["HUGGINGFACEHUB_API_TOKEN"] = HF_API_KEY
 
27
 
28
- # Try different approaches to load a model, from most to least sophisticated
29
  try:
30
- print(f"Attempting to load model {LLM_MODEL} using local pipeline...")
 
 
31
 
32
- # Try using Hugging Face pipeline locally
33
  try:
34
- from transformers import pipeline
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
- # Use a simple pipeline with a small model
37
  pipe = pipeline(
38
  "text-generation",
39
  model=LLM_MODEL,
@@ -42,44 +65,18 @@ def get_llm():
42
  )
43
 
44
  return HuggingFacePipeline(pipeline=pipe)
45
- except Exception as pipe_error:
46
- print(f"Error loading pipeline: {pipe_error}")
47
 
48
- # Try using the API if we have a token
49
- if HF_API_KEY:
50
- print("Falling back to API with auth token...")
51
- return HuggingFaceHub(
52
- huggingfacehub_api_token=HF_API_KEY,
53
- repo_id=LLM_MODEL,
54
- model_kwargs={
55
- "temperature": DEFAULT_TEMPERATURE,
56
- "max_length": MAX_TOKENS
57
- }
58
- )
59
- else:
60
- print("No API key, using endpoint without auth...")
61
- # Try a simple endpoint without auth
62
- return HuggingFaceEndpoint(
63
- endpoint_url=f"https://api-inference.huggingface.co/models/{LLM_MODEL}",
64
- task="text-generation",
65
- model_kwargs={
66
- "temperature": DEFAULT_TEMPERATURE,
67
- "max_length": MAX_TOKENS
68
- }
69
- )
70
  except Exception as e:
71
- print(f"All LLM approaches failed: {e}")
72
- print("Using a fallback mock LLM.")
73
 
74
- # Create a very simple mock LLM for fallback
75
  from langchain.llms.fake import FakeListLLM
 
76
  return FakeListLLM(
77
  responses=[
78
- "I'm a simple AI assistant. I can't access external knowledge right now, but I'll try to help with basic questions.",
79
- "I'm currently operating in a limited mode. How else can I assist you?",
80
- "I'm sorry, but I don't have access to that information at the moment.",
81
- "I'm a basic AI assistant running in fallback mode. Let me try to help.",
82
- "I'm operating with limited capabilities right now. Could you ask something simpler?"
83
  ]
84
  )
85
 
@@ -92,20 +89,22 @@ def get_embeddings():
92
  os.makedirs(cache_dir, exist_ok=True)
93
  os.chmod(cache_dir, 0o777)
94
  except Exception as e:
95
- print(f"Warning: Could not create cache directory: {e}")
96
  cache_dir = None
97
 
98
- # Try to use local embeddings first (most reliable)
99
  try:
 
100
  return HuggingFaceEmbeddings(
101
  model_name=EMBEDDING_MODEL,
102
  cache_folder=cache_dir
103
  )
104
  except Exception as e:
105
- print(f"Error initializing embeddings: {e}")
106
 
107
  # Create mock embeddings that return random vectors for fallback
108
  from langchain.embeddings.fake import FakeEmbeddings
 
109
  return FakeEmbeddings(size=384) # Standard size for small embedding models
110
 
111
  def get_chat_model():
 
5
  from langchain.prompts import PromptTemplate
6
  import sys
7
  import os
8
+ import logging
9
+
10
+ # Configure logging
11
+ logging.basicConfig(level=logging.INFO)
12
+ logger = logging.getLogger(__name__)
13
 
14
  # Add project root to path for imports
15
  sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
 
24
  os.makedirs(cache_dir, exist_ok=True)
25
  os.chmod(cache_dir, 0o777)
26
  except Exception as e:
27
+ logger.warning(f"Could not create cache directory: {e}")
28
  cache_dir = None
29
 
30
+ # Never rely on API key in Spaces environment
31
+ api_key = os.getenv("HUGGINGFACEHUB_API_TOKEN", "") or os.getenv("HF_API_KEY", "")
32
+ logger.info(f"Using model: {LLM_MODEL}")
33
 
34
+ # Always try local pipeline first (most reliable in Spaces)
35
  try:
36
+ from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
37
+
38
+ logger.info(f"Loading model {LLM_MODEL} as local pipeline")
39
 
40
+ # Try loading with more specific model classes for better compatibility
41
  try:
42
+ # Load tokenizer and model explicitly
43
+ tokenizer = AutoTokenizer.from_pretrained(LLM_MODEL)
44
+ model = AutoModelForCausalLM.from_pretrained(LLM_MODEL)
45
+
46
+ # Create pipeline with loaded components
47
+ pipe = pipeline(
48
+ "text-generation",
49
+ model=model,
50
+ tokenizer=tokenizer,
51
+ max_length=MAX_TOKENS,
52
+ temperature=DEFAULT_TEMPERATURE
53
+ )
54
+
55
+ return HuggingFacePipeline(pipeline=pipe)
56
+ except Exception as e:
57
+ logger.warning(f"Error loading with explicit model/tokenizer: {e}")
58
 
59
+ # Fallback to simpler pipeline instantiation
60
  pipe = pipeline(
61
  "text-generation",
62
  model=LLM_MODEL,
 
65
  )
66
 
67
  return HuggingFacePipeline(pipeline=pipe)
 
 
68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  except Exception as e:
70
+ logger.warning(f"Error creating local pipeline: {e}")
 
71
 
72
+ # Last resort - mock LLM for fallback
73
  from langchain.llms.fake import FakeListLLM
74
+ logger.warning("Using mock LLM as fallback")
75
  return FakeListLLM(
76
  responses=[
77
+ "I'm running in fallback mode due to model loading issues. I have limited capabilities right now.",
78
+ "I can't access the language model currently. Please check the Space logs for more information.",
79
+ "I'm operating with a simplified model. For better performance, try running this app locally with proper models configured."
 
 
80
  ]
81
  )
82
 
 
89
  os.makedirs(cache_dir, exist_ok=True)
90
  os.chmod(cache_dir, 0o777)
91
  except Exception as e:
92
+ logger.warning(f"Could not create cache directory: {e}")
93
  cache_dir = None
94
 
95
+ # Try to use local embeddings
96
  try:
97
+ logger.info(f"Loading embeddings model: {EMBEDDING_MODEL}")
98
  return HuggingFaceEmbeddings(
99
  model_name=EMBEDDING_MODEL,
100
  cache_folder=cache_dir
101
  )
102
  except Exception as e:
103
+ logger.warning(f"Error initializing embeddings: {e}")
104
 
105
  # Create mock embeddings that return random vectors for fallback
106
  from langchain.embeddings.fake import FakeEmbeddings
107
+ logger.warning("Using mock embeddings as fallback")
108
  return FakeEmbeddings(size=384) # Standard size for small embedding models
109
 
110
  def get_chat_model():
deploy_to_hf.py CHANGED
@@ -6,6 +6,7 @@ This script will help you set environment variables and deploy your app.
6
  import os
7
  import sys
8
  import subprocess
 
9
  from getpass import getpass
10
  from huggingface_hub import HfApi, SpaceHardware, SpaceStage
11
 
@@ -15,29 +16,35 @@ def setup_deployment():
15
  print("Hugging Face Spaces Deployment Setup")
16
  print("="*50)
17
 
18
- # Get user credentials
19
- username = input("Enter your Hugging Face username: ")
20
- token = getpass("Enter your Hugging Face token (from https://huggingface.co/settings/tokens): ")
21
- space_name = input("Enter your Space name (default: personal-rag-assistant): ") or "personal-rag-assistant"
22
 
23
- # Set environment variables
24
- os.environ["HF_USERNAME"] = username
25
- os.environ["HF_TOKEN"] = token
26
- os.environ["SPACE_NAME"] = space_name
 
 
 
 
 
 
27
 
28
  # Write credentials to .env file
29
  with open(".env", "w") as f:
30
  f.write(f"HF_API_KEY={token}\n")
31
  f.write(f"HF_USERNAME={username}\n")
32
  f.write(f"SPACE_NAME={space_name}\n")
33
- f.write("LLM_MODEL=google/flan-t5-large\n")
34
  f.write("EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2\n")
35
  f.write("VECTOR_DB_PATH=./data/vector_db\n")
36
  f.write("COLLECTION_NAME=personal_assistant\n")
37
  f.write("DEFAULT_TEMPERATURE=0.7\n")
38
- f.write("CHUNK_SIZE=1000\n")
39
- f.write("CHUNK_OVERLAP=200\n")
40
- f.write("MAX_TOKENS=512\n")
41
 
42
  # Set up git credential helper for Hugging Face
43
  try:
@@ -80,6 +87,19 @@ def create_space(username, token, space_name):
80
  exists = any(space.id == f"{username}/{space_name}" for space in spaces)
81
  if exists:
82
  print(f"Space {username}/{space_name} exists.")
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  else:
84
  print(f"Space {username}/{space_name} does not exist. Creating...")
85
  # Create the space
@@ -175,7 +195,7 @@ def prepare_git_push(username, space_name):
175
  # Add and commit files
176
  subprocess.run(["git", "add", "."], check=True)
177
  try:
178
- subprocess.run(["git", "commit", "-m", "Initial commit for Hugging Face Space deployment"], check=True)
179
  except subprocess.CalledProcessError:
180
  # Check if there are changes to commit
181
  status = subprocess.run(["git", "status", "--porcelain"], capture_output=True, text=True, check=True).stdout.strip()
@@ -203,6 +223,9 @@ def push_to_space(username, token):
203
  env["GIT_USERNAME"] = username
204
  env["GIT_PASSWORD"] = token
205
 
 
 
 
206
  # Determine current branch
207
  current_branch = subprocess.run(
208
  ["git", "branch", "--show-current"],
@@ -211,6 +234,8 @@ def push_to_space(username, token):
211
 
212
  if not current_branch:
213
  current_branch = "master" # Default to master if no branch is returned
 
 
214
 
215
  # Push code - force push to override any existing content
216
  print(f"Pushing from branch {current_branch} to main...")
@@ -219,25 +244,52 @@ def push_to_space(username, token):
219
  print("\nRunning git push command...")
220
  print(f"Pushing to Space as user: {username}")
221
 
222
- # Try to push
223
- try:
224
- subprocess.run(cmd, check=True, env=env)
225
- except subprocess.CalledProcessError as e:
226
- print(f"Error during push: {e}")
227
 
228
- # Try direct URL push as alternative
229
- print("\nTrying alternative direct URL push...")
230
- direct_url = f"https://{username}:{token}@huggingface.co/spaces/{username}/{os.environ.get('SPACE_NAME')}"
231
- alt_cmd = ["git", "push", "-f", direct_url, f"{current_branch}:main"]
 
232
 
 
 
 
 
 
 
 
 
 
233
  try:
234
- subprocess.run(alt_cmd, check=True, env=env)
 
 
 
 
235
  except subprocess.CalledProcessError as e:
236
- print(f"Direct URL push also failed: {e}")
237
- raise
 
 
238
 
239
- print("\nCode pushed to Hugging Face Space successfully!")
240
- except subprocess.CalledProcessError as e:
 
 
 
 
 
 
 
 
 
 
 
 
241
  print(f"Error pushing code: {e}")
242
  print("\nTroubleshooting git push issues:")
243
  print("1. Ensure your Hugging Face token has write access")
@@ -251,29 +303,44 @@ def push_to_space(username, token):
251
  return True
252
 
253
  def main():
254
- """Main function to run the deployment process."""
255
- username, token, space_name = setup_deployment()
256
-
257
- # Create the Space
258
- if not create_space(username, token, space_name):
259
- print("Failed to create Space. Attempting to continue anyway.")
260
-
261
- # Prepare git for pushing
262
- if not prepare_git_push(username, space_name):
263
- print("Failed to prepare git. Exiting.")
264
- return
265
-
266
- # Push code to Space
267
- if not push_to_space(username, token):
268
- print("Failed to push code. Exiting.")
269
- return
270
-
271
- print("\n" + "="*50)
272
- print(f"Deployment completed! Your app should be available at:")
273
- print(f"https://huggingface.co/spaces/{username}/{space_name}")
274
  print("="*50)
275
- print("\nNote: It may take a few minutes for the Space to build and deploy your app.")
276
- print("You can monitor the build progress on the Space page.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
277
 
278
  if __name__ == "__main__":
279
  main()
 
6
  import os
7
  import sys
8
  import subprocess
9
+ import time
10
  from getpass import getpass
11
  from huggingface_hub import HfApi, SpaceHardware, SpaceStage
12
 
 
16
  print("Hugging Face Spaces Deployment Setup")
17
  print("="*50)
18
 
19
+ # Check if running in an environment with saved credentials
20
+ username = os.environ.get("HF_USERNAME")
21
+ token = os.environ.get("HF_TOKEN")
22
+ space_name = os.environ.get("SPACE_NAME")
23
 
24
+ # If not, ask for credentials
25
+ if not (username and token and space_name):
26
+ username = input("Enter your Hugging Face username: ")
27
+ token = getpass("Enter your Hugging Face token (from https://huggingface.co/settings/tokens): ")
28
+ space_name = input("Enter your Space name (default: personal-rag-assistant): ") or "personal-rag-assistant"
29
+
30
+ # Set environment variables
31
+ os.environ["HF_USERNAME"] = username
32
+ os.environ["HF_TOKEN"] = token
33
+ os.environ["SPACE_NAME"] = space_name
34
 
35
  # Write credentials to .env file
36
  with open(".env", "w") as f:
37
  f.write(f"HF_API_KEY={token}\n")
38
  f.write(f"HF_USERNAME={username}\n")
39
  f.write(f"SPACE_NAME={space_name}\n")
40
+ f.write("LLM_MODEL=distilgpt2\n") # Use smaller model to avoid 403 errors
41
  f.write("EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2\n")
42
  f.write("VECTOR_DB_PATH=./data/vector_db\n")
43
  f.write("COLLECTION_NAME=personal_assistant\n")
44
  f.write("DEFAULT_TEMPERATURE=0.7\n")
45
+ f.write("CHUNK_SIZE=512\n") # Smaller chunk size
46
+ f.write("CHUNK_OVERLAP=128\n") # Smaller overlap
47
+ f.write("MAX_TOKENS=256\n") # Smaller token limit
48
 
49
  # Set up git credential helper for Hugging Face
50
  try:
 
87
  exists = any(space.id == f"{username}/{space_name}" for space in spaces)
88
  if exists:
89
  print(f"Space {username}/{space_name} exists.")
90
+
91
+ # Check if we need to update space configuration
92
+ try:
93
+ print("Updating Space configuration to use Docker...")
94
+ api.update_space(
95
+ repo_id=f"{username}/{space_name}",
96
+ private=False,
97
+ sdk="docker",
98
+ hardware=SpaceHardware.CPU_BASIC
99
+ )
100
+ print("Space configuration updated.")
101
+ except Exception as e:
102
+ print(f"Note: Could not update space configuration: {e}")
103
  else:
104
  print(f"Space {username}/{space_name} does not exist. Creating...")
105
  # Create the space
 
195
  # Add and commit files
196
  subprocess.run(["git", "add", "."], check=True)
197
  try:
198
+ subprocess.run(["git", "commit", "-m", "Update for Hugging Face Space deployment"], check=True)
199
  except subprocess.CalledProcessError:
200
  # Check if there are changes to commit
201
  status = subprocess.run(["git", "status", "--porcelain"], capture_output=True, text=True, check=True).stdout.strip()
 
223
  env["GIT_USERNAME"] = username
224
  env["GIT_PASSWORD"] = token
225
 
226
+ # Make sure HUGGINGFACEHUB_API_TOKEN is set in the environment
227
+ env["HUGGINGFACEHUB_API_TOKEN"] = token
228
+
229
  # Determine current branch
230
  current_branch = subprocess.run(
231
  ["git", "branch", "--show-current"],
 
234
 
235
  if not current_branch:
236
  current_branch = "master" # Default to master if no branch is returned
237
+ if not os.path.exists(".git/refs/heads/master"):
238
+ current_branch = "main" # Try main as another default
239
 
240
  # Push code - force push to override any existing content
241
  print(f"Pushing from branch {current_branch} to main...")
 
244
  print("\nRunning git push command...")
245
  print(f"Pushing to Space as user: {username}")
246
 
247
+ # Try different push methods in sequence until one works
248
+ methods = [
249
+ # Method 1: Standard remote push
250
+ lambda: subprocess.run(cmd, check=True, env=env),
 
251
 
252
+ # Method 2: Direct URL push
253
+ lambda: subprocess.run(
254
+ ["git", "push", "-f", f"https://{username}:{token}@huggingface.co/spaces/{username}/{os.environ.get('SPACE_NAME')}", f"{current_branch}:main"],
255
+ check=True, env=env
256
+ ),
257
 
258
+ # Method 3: Push with credentials explicitly set
259
+ lambda: subprocess.run(
260
+ ["git", "push", "-f", "hf", f"{current_branch}:main"],
261
+ check=True, env={**env, "HUGGINGFACE_TOKEN": token, "HF_TOKEN": token}
262
+ )
263
+ ]
264
+
265
+ success = False
266
+ for i, method in enumerate(methods, 1):
267
  try:
268
+ print(f"\nTrying push method {i}...")
269
+ method()
270
+ print(f"Push method {i} succeeded!")
271
+ success = True
272
+ break
273
  except subprocess.CalledProcessError as e:
274
+ print(f"Push method {i} failed: {e}")
275
+ if i < len(methods):
276
+ print("Trying next method...")
277
+ time.sleep(2) # Give a small delay before trying the next method
278
 
279
+ if success:
280
+ print("\nCode pushed to Hugging Face Space successfully!")
281
+ else:
282
+ raise Exception("All push methods failed")
283
+
284
+ # Wait a moment to ensure the Space starts building
285
+ print("\nWaiting for Space to start building...")
286
+ time.sleep(5)
287
+
288
+ print(f"\nYour Space will be available at: https://huggingface.co/spaces/{username}/{os.environ.get('SPACE_NAME')}")
289
+ print("It may take a few minutes for the Space to build and start.")
290
+ return True
291
+
292
+ except Exception as e:
293
  print(f"Error pushing code: {e}")
294
  print("\nTroubleshooting git push issues:")
295
  print("1. Ensure your Hugging Face token has write access")
 
303
  return True
304
 
305
  def main():
306
+ """Main entry point for the deployment script."""
307
+ print("Hugging Face Space Deployment Script")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
308
  print("="*50)
309
+ print("This script will help you deploy your app to Hugging Face Spaces.")
310
+
311
+ try:
312
+ # Set up deployment environment
313
+ username, token, space_name = setup_deployment()
314
+
315
+ # Create the Space
316
+ if not create_space(username, token, space_name):
317
+ print("Error creating Space. Please check your credentials and try again.")
318
+ sys.exit(1)
319
+
320
+ # Prepare git repository
321
+ if not prepare_git_push(username, space_name):
322
+ print("Error preparing git repository. Please check your git configuration and try again.")
323
+ sys.exit(1)
324
+
325
+ # Push to Space
326
+ if not push_to_space(username, token):
327
+ print("Error pushing to Space. Please check the logs and try again.")
328
+ sys.exit(1)
329
+
330
+ print("\nDeployment complete!")
331
+ print(f"Your app is now available at: https://huggingface.co/spaces/{username}/{space_name}")
332
+ print("\nNote: It may take a few minutes for the Space to build and start.")
333
+ print("If your app is not showing up properly, check the Space logs in the Hugging Face UI.")
334
+ print("Common issues:")
335
+ print("1. Permission errors - check that cache directories have proper permissions")
336
+ print("2. Model loading errors - try using a smaller model")
337
+ print("3. Port configuration - ensure app is running on port 7860")
338
+ except KeyboardInterrupt:
339
+ print("\nDeployment interrupted by user.")
340
+ sys.exit(1)
341
+ except Exception as e:
342
+ print(f"\nUnexpected error: {e}")
343
+ sys.exit(1)
344
 
345
  if __name__ == "__main__":
346
  main()
push_to_hf.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ """
3
+ Simple script to push directly to Hugging Face Space.
4
+ This is a streamlined approach when you already have a Space.
5
+ """
6
+ import os
7
+ import subprocess
8
+ import sys
9
+ from getpass import getpass
10
+
11
+ def push_to_huggingface():
12
+ """Push the current directory to Hugging Face Space."""
13
+ print("=" * 50)
14
+ print("Simple Hugging Face Push Tool")
15
+ print("=" * 50)
16
+
17
+ # Get credentials
18
+ username = input("Enter your Hugging Face username: ")
19
+ token = getpass("Enter your Hugging Face token: ")
20
+ space_name = input("Enter your Space name: ")
21
+
22
+ # Set environment variables
23
+ os.environ["HUGGINGFACEHUB_API_TOKEN"] = token
24
+
25
+ # Add the direct remote URL
26
+ remote_url = f"https://{username}:{token}@huggingface.co/spaces/{username}/{space_name}"
27
+
28
+ try:
29
+ # Add remote if not exists
30
+ remotes = subprocess.run(["git", "remote"], capture_output=True, text=True).stdout.strip().split('\n')
31
+ if "hf" not in remotes:
32
+ subprocess.run(["git", "remote", "add", "hf", remote_url], check=True)
33
+ else:
34
+ subprocess.run(["git", "remote", "set-url", "hf", remote_url], check=True)
35
+
36
+ # Stage all files
37
+ subprocess.run(["git", "add", "."], check=True)
38
+
39
+ # Commit changes
40
+ try:
41
+ subprocess.run(["git", "commit", "-m", "Fix 403 error by using local models"], check=True)
42
+ except subprocess.CalledProcessError:
43
+ # Check if there are changes to commit
44
+ status = subprocess.run(["git", "status", "--porcelain"], capture_output=True, text=True).stdout.strip()
45
+ if not status:
46
+ print("No changes to commit.")
47
+ else:
48
+ print("Error making commit. Will try to push existing commits.")
49
+
50
+ # Force push to Space
51
+ print("Pushing to Hugging Face Space...")
52
+ subprocess.run(["git", "push", "-f", "hf", "HEAD:main"], check=True)
53
+
54
+ print("\nSuccess! Your code has been pushed to Hugging Face Space.")
55
+ print(f"View your Space at: https://huggingface.co/spaces/{username}/{space_name}")
56
+ print("Note: It may take a few minutes for changes to appear.")
57
+
58
+ except subprocess.CalledProcessError as e:
59
+ print(f"Error: {e}")
60
+ sys.exit(1)
61
+ except Exception as e:
62
+ print(f"Unexpected error: {e}")
63
+ sys.exit(1)
64
+
65
+ if __name__ == "__main__":
66
+ push_to_huggingface()