NitinBot001 commited on
Commit
04816bd
·
verified ·
1 Parent(s): fb6c9af

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +97 -78
app.py CHANGED
@@ -1,97 +1,116 @@
1
  import os
2
- import threading
3
- from flask import Flask, jsonify, request
4
- from llama_cpp import Llama
5
  import requests
 
 
6
  import subprocess
 
7
  import json
8
 
9
  app = Flask(__name__)
10
 
11
- # Configuration
12
- MODEL_PATH = "/tmp/model/calme-3.3-llamaloi-3b.Q4_K_M.gguf"
13
- GH_PAT = os.getenv("GH_PAT")
 
14
  REPO_URL = "https://github.com/NitinBot001/Audio-url-new-js.git"
15
 
16
- def background_init():
17
- """Handle time-consuming operations in background"""
18
- try:
19
- # 1. Download model
20
- os.makedirs(os.path.dirname(MODEL_PATH), exist_ok=True)
21
- if not os.path.exists(MODEL_PATH):
22
- print("Starting model download...")
23
- url = "https://huggingface.co/MaziyarPanahi/calme-3.3-llamaloi-3b-GGUF/resolve/main/calme-3.3-llamaloi-3b.Q4_K_M.gguf"
24
- with requests.get(url, stream=True) as r:
25
- r.raise_for_status()
26
- with open(MODEL_PATH, "wb") as f:
27
- for chunk in r.iter_content(chunk_size=8192):
28
- f.write(chunk)
29
-
30
- # 2. Initialize LLM
31
- global llm
32
- llm = Llama(
33
- model_path=MODEL_PATH,
34
- n_ctx=8192, # Reduced from 131072 for faster startup
35
- n_threads=2,
36
- n_gpu_layers=0,
37
- verbose=False
38
  )
39
-
40
- # 3. Tunnel and Git operations
41
- tunnel_url = start_tunnel()
42
- push_tunnel_url_to_repo(tunnel_url)
43
-
44
- except Exception as e:
45
- print(f"Background init failed: {str(e)}")
46
 
47
  def start_tunnel():
48
- """Start tunnel and return URL"""
49
- proc = subprocess.Popen(
50
- ["npx", "nport", "-s", "hf-space", "-p", "7860"],
51
  stdout=subprocess.PIPE,
52
- stderr=subprocess.PIPE
53
  )
54
- # Wait for tunnel URL
55
- for line in iter(proc.stdout.readline, b''):
56
- if b"your domain is:" in line:
57
- return line.decode().split("your domain is: ")[1].strip()
58
- raise RuntimeError("Failed to get tunnel URL")
59
-
60
- def push_tunnel_url_to_repo(url):
61
- """Update repository with tunnel URL"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  repo_dir = "/tmp/repo"
63
- subprocess.run(["rm", "-rf", repo_dir], check=True)
64
- subprocess.run([
65
- "git", "clone",
66
- f"https://x-access-token:{GH_PAT}@github.com/NitinBot001/Audio-url-new-js.git",
67
- repo_dir
68
- ], check=True)
69
-
70
- with open(f"{repo_dir}/instance.json", "w") as f:
71
- json.dump({"tunnel_url": url}, f)
72
-
73
- subprocess.run(["git", "-C", repo_dir, "add", "."], check=True)
74
- subprocess.run([
75
- "git", "-C", repo_dir,
76
- "commit", "-m", f"Update tunnel URL: {url}"
77
- ], check=True)
78
- subprocess.run(["git", "-C", repo_dir, "push"], check=True)
79
-
80
- @app.route("/chat", methods=["GET"])
81
- def chat():
82
- if 'llm' not in globals():
83
- return jsonify({"error": "Initializing, try again later"}), 503
84
- message = request.args.get("message", "")
85
- prompt = f"<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n{message}<|eot_id|>\n<|start_header_id|>assistant<|end_header_id|>\n"
86
- output = llm(prompt, max_tokens=512, stop=["<|eot_id|>"])
87
- return jsonify({"response": output['choices'][0]['text'].strip()})
88
 
89
- @app.route("/health")
90
- def health_check():
91
- return "OK", 200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
92
 
93
  if __name__ == "__main__":
94
- # Start background initialization
95
- threading.Thread(target=background_init, daemon=True).start()
96
- # Start Flask server
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  app.run(host="0.0.0.0", port=7860)
 
1
  import os
 
 
 
2
  import requests
3
+ from flask import Flask, request, jsonify
4
+ from llama_cpp import Llama
5
  import subprocess
6
+ import time
7
  import json
8
 
9
  app = Flask(__name__)
10
 
11
+ # Use /tmp directory for storing the model
12
+ MODEL_DIR = "/tmp/model"
13
+ MODEL_PATH = os.path.join(MODEL_DIR, "calme-3.3-llamaloi-3b.Q4_K_M.gguf")
14
+ GH_PAT = os.getenv("GH_PAT") # GitHub Personal Access Token
15
  REPO_URL = "https://github.com/NitinBot001/Audio-url-new-js.git"
16
 
17
+ def download_model():
18
+ os.makedirs(MODEL_DIR, exist_ok=True) # Create the /tmp/model directory
19
+ if not os.path.exists(MODEL_PATH):
20
+ print("Downloading model...")
21
+ r = requests.get(
22
+ "https://huggingface.co/MaziyarPanahi/calme-3.3-llamaloi-3b-GGUF/resolve/main/calme-3.3-llamaloi-3b.Q4_K_M.gguf",
23
+ stream=True,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  )
25
+ with open(MODEL_PATH, "wb") as f:
26
+ for chunk in r.iter_content(chunk_size=8192):
27
+ f.write(chunk)
 
 
 
 
28
 
29
  def start_tunnel():
30
+ # Start nport tunnel
31
+ tunnel_process = subprocess.Popen(
32
+ ["npx", "nport", "-s", "ai-service", "-p", "7860"], # Use port 7860
33
  stdout=subprocess.PIPE,
34
+ stderr=subprocess.PIPE,
35
  )
36
+ time.sleep(10) # Wait for tunnel to establish
37
+
38
+ # Extract tunnel URL from logs
39
+ tunnel_url = None
40
+ for line in iter(tunnel_process.stdout.readline, b""):
41
+ line = line.decode("utf-8").strip()
42
+ if "your domain is:" in line:
43
+ tunnel_url = line.split("your domain is: ")[1]
44
+ break
45
+
46
+ if not tunnel_url:
47
+ raise Exception("Failed to extract tunnel URL")
48
+
49
+ return tunnel_url
50
+
51
+ def push_tunnel_url_to_repo(tunnel_url):
52
+ # Create instance.json
53
+ instance_data = {"tunnel_url": tunnel_url}
54
+ with open("/tmp/instance.json", "w") as f:
55
+ json.dump(instance_data, f)
56
+
57
+ # Clone the repository
58
  repo_dir = "/tmp/repo"
59
+ repo_url = f"https://x-access-token:{GH_PAT}@github.com/NitinBot001/Audio-url-new-js.git"
60
+ subprocess.run(
61
+ ["git", "clone", repo_url, repo_dir],
62
+ check=True,
63
+ )
64
+ os.chdir(repo_dir)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
 
66
+ # Move instance.json to the repository
67
+ subprocess.run(["mv", "/tmp/instance.json", "."], check=True)
68
+
69
+ # Configure Git locally (without --global)
70
+ subprocess.run(["git", "config", "user.email", "[email protected]"], check=True)
71
+ subprocess.run(["git", "config", "user.name", "github-actions"], check=True)
72
+
73
+ # Commit and push changes
74
+ subprocess.run(["git", "add", "instance.json"], check=True)
75
+ subprocess.run(["git", "commit", "-m", f"Update tunnel URL to {tunnel_url}"], check=True)
76
+ subprocess.run(["git", "push", "origin", "main"], check=True)
77
+
78
+ @app.route("/chat", methods=["POST"])
79
+ def chat():
80
+ data = request.json
81
+ # Construct the prompt without duplicate special tokens
82
+ prompt = (
83
+ f"<|begin_of_text|>"
84
+ f"<|start_header_id|>user<|end_header_id|>\n"
85
+ f"{data.get('message', '')}"
86
+ f"<|eot_id|>\n"
87
+ f"<|start_header_id|>assistant<|end_header_id|>\n"
88
+ )
89
+ output = llm(
90
+ prompt,
91
+ max_tokens=2048,
92
+ stop=["<|eot_id|>"],
93
+ temperature=0.8,
94
+ top_p=0.9,
95
+ )
96
+ return jsonify({"response": output["choices"][0]["text"].strip()})
97
 
98
  if __name__ == "__main__":
99
+ # Download the model
100
+ download_model()
101
+
102
+ # Initialize the LLM
103
+ llm = Llama(
104
+ model_path=MODEL_PATH,
105
+ n_ctx=131072, # Set to match the training context length
106
+ n_threads=2,
107
+ n_gpu_layers=0,
108
+ verbose=False,
109
+ )
110
+
111
+ # Start the tunnel and push the URL
112
+ tunnel_url = start_tunnel()
113
+ push_tunnel_url_to_repo(tunnel_url)
114
+
115
+ # Run the Flask app (for development only)
116
  app.run(host="0.0.0.0", port=7860)