Spaces:
Sleeping
Sleeping
File size: 1,736 Bytes
d9df964 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 |
# self_update_test.py
import os
import requests
import time
def test_self_update():
print("Testing self-update capability...")
# Original response handler content
original_content = """def generate_response(prompt, tokenizer, model):
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_length=200)
return tokenizer.decode(outputs[0], skip_special_tokens=True)"""
# Updated version
updated_content = """def generate_response(prompt, tokenizer, model):
# Improved version with better parameters
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(
**inputs,
max_length=300,
temperature=0.7,
top_p=0.9,
repetition_penalty=1.1
)
return tokenizer.decode(outputs[0], skip_special_tokens=True)"""
# Save original version
with open("response_handler.py", "w") as f:
f.write(original_content)
# Start the app (in a real test, this would be a separate process)
print("Starting with original version...")
# Simulate update process
print("Triggering update...")
response = requests.post("http://localhost:7860/api/predict", json={"data": ["/update"]})
print(f"Update result: {response.json()['data']}")
# Verify update
time.sleep(2)
with open("response_handler.py", "r") as f:
current_content = f.read()
if updated_content in current_content:
print("✅ Self-update successful!")
else:
print("❌ Self-update failed")
# Restore original
with open("response_handler.py", "w") as f:
f.write(original_content)
if __name__ == "__main__":
test_self_update() |