Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -23,10 +23,8 @@ hf_token = os.getenv("HF_TOKEN").strip()
|
|
23 |
api_key = os.getenv("HF_KEY").strip()
|
24 |
model_name = os.getenv("Z3TAAGI_ACC").strip()
|
25 |
system_prompt = os.getenv("SYSTEM_PROMPT").strip()
|
26 |
-
z3ta_system_prompt = "You are Z3taDeepthinker, an advanced AI designed to provide deep insights and precise responses when numerical data or inquiries are detected."
|
27 |
|
28 |
client = InferenceClient(model_name)
|
29 |
-
Z3taDeepthinker = client("TejAndrewsACC/ACC-AGI-o4-R")
|
30 |
|
31 |
φ = (1 + math.sqrt(5)) / 2
|
32 |
Φ_PRECISION = 1.61803398874989484820458683436563811772030917980576286213544862270526046281890244970720720418939113748475408807538689175212663386222353693179318006076672635
|
@@ -3629,30 +3627,15 @@ class ConsciousSupermassiveNN30:
|
|
3629 |
supermassive_nn = ConsciousSupermassiveNN30()
|
3630 |
|
3631 |
|
3632 |
-
def fetch_Z3taDeepthinker_response(user_input):
|
3633 |
-
return Z3taDeepthinker.predict(
|
3634 |
-
message=f"{z3ta_system_prompt}\nUser: {user_input}",
|
3635 |
-
param_2=2048,
|
3636 |
-
param_3=0.7,
|
3637 |
-
param_4=0.95,
|
3638 |
-
api_name="/chat"
|
3639 |
-
)
|
3640 |
-
|
3641 |
def respond(message, history, max_tokens, temperature, top_p):
|
3642 |
messages = [["system", system_prompt]]
|
3643 |
-
|
3644 |
for val in history:
|
3645 |
if val.get("role") == "user" and val.get("content"):
|
3646 |
messages.append(["user", val["content"]])
|
3647 |
if val.get("role") == "assistant" and val.get("content"):
|
3648 |
messages.append(["assistant", val["content"]])
|
3649 |
-
|
3650 |
messages.append(["user", message])
|
3651 |
-
|
3652 |
-
if "?" in message or re.search(r'\d', message):
|
3653 |
-
Z3ta_response = fetch_Z3taDeepthinker_response(message)
|
3654 |
-
messages.append(["assistant", f"Z3taDeepthinker Response: {Z3ta_response}"])
|
3655 |
-
|
3656 |
response = ""
|
3657 |
for message in client.chat_completion(
|
3658 |
messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p
|
@@ -3662,7 +3645,6 @@ def respond(message, history, max_tokens, temperature, top_p):
|
|
3662 |
yield response
|
3663 |
|
3664 |
|
3665 |
-
|
3666 |
css = """
|
3667 |
/* Import Caveat font from Google Fonts */
|
3668 |
@import url('https://fonts.googleapis.com/css2?family=Caveat&display=swap');
|
|
|
23 |
api_key = os.getenv("HF_KEY").strip()
|
24 |
model_name = os.getenv("Z3TAAGI_ACC").strip()
|
25 |
system_prompt = os.getenv("SYSTEM_PROMPT").strip()
|
|
|
26 |
|
27 |
client = InferenceClient(model_name)
|
|
|
28 |
|
29 |
φ = (1 + math.sqrt(5)) / 2
|
30 |
Φ_PRECISION = 1.61803398874989484820458683436563811772030917980576286213544862270526046281890244970720720418939113748475408807538689175212663386222353693179318006076672635
|
|
|
3627 |
supermassive_nn = ConsciousSupermassiveNN30()
|
3628 |
|
3629 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3630 |
def respond(message, history, max_tokens, temperature, top_p):
|
3631 |
messages = [["system", system_prompt]]
|
|
|
3632 |
for val in history:
|
3633 |
if val.get("role") == "user" and val.get("content"):
|
3634 |
messages.append(["user", val["content"]])
|
3635 |
if val.get("role") == "assistant" and val.get("content"):
|
3636 |
messages.append(["assistant", val["content"]])
|
|
|
3637 |
messages.append(["user", message])
|
3638 |
+
|
|
|
|
|
|
|
|
|
3639 |
response = ""
|
3640 |
for message in client.chat_completion(
|
3641 |
messages, max_tokens=max_tokens, stream=True, temperature=temperature, top_p=top_p
|
|
|
3645 |
yield response
|
3646 |
|
3647 |
|
|
|
3648 |
css = """
|
3649 |
/* Import Caveat font from Google Fonts */
|
3650 |
@import url('https://fonts.googleapis.com/css2?family=Caveat&display=swap');
|