Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
import gradio as gr
|
2 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
3 |
from playwright.sync_api import sync_playwright
|
4 |
from flax import linen as nn
|
5 |
from jax import random
|
@@ -28,34 +28,30 @@ class ActionModel(nn.Module):
|
|
28 |
logits = self.dense(output)
|
29 |
return logits, new_state
|
30 |
|
31 |
-
# Initialize Flax model
|
32 |
-
vocab_size = 50257
|
33 |
hidden_size = 1024
|
34 |
num_layers = 2
|
35 |
key = random.PRNGKey(0)
|
36 |
model = ActionModel(vocab_size, hidden_size, num_layers)
|
37 |
init_state = model.lstm.initialize_carry(key, (1, hidden_size))
|
38 |
|
39 |
-
# Function to generate actions using
|
40 |
def generate_actions(input_text, browser, page):
|
41 |
-
# Load
|
42 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
43 |
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
|
44 |
|
45 |
-
# Prepare input for
|
46 |
inputs = tokenizer(input_text, return_tensors="pt")
|
47 |
inputs = inputs.to(model.device)
|
48 |
|
49 |
-
# Generate response
|
50 |
-
|
51 |
-
|
52 |
-
max_length=MAX_LENGTH,
|
53 |
-
num_beams=NUM_BEAMS,
|
54 |
-
temperature=0.7,
|
55 |
-
)
|
56 |
|
57 |
# Decode response and extract actions
|
58 |
-
response =
|
59 |
actions = response.split("\n")
|
60 |
|
61 |
# Perform actions
|
|
|
1 |
import gradio as gr
|
2 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
3 |
from playwright.sync_api import sync_playwright
|
4 |
from flax import linen as nn
|
5 |
from jax import random
|
|
|
28 |
logits = self.dense(output)
|
29 |
return logits, new_state
|
30 |
|
31 |
+
# Initialize Flax model and get its initial state
|
32 |
+
vocab_size = 50257 # Adjust this if needed for Zephyr-7b-beta
|
33 |
hidden_size = 1024
|
34 |
num_layers = 2
|
35 |
key = random.PRNGKey(0)
|
36 |
model = ActionModel(vocab_size, hidden_size, num_layers)
|
37 |
init_state = model.lstm.initialize_carry(key, (1, hidden_size))
|
38 |
|
39 |
+
# Function to generate actions using Zephyr-7b-beta model
|
40 |
def generate_actions(input_text, browser, page):
|
41 |
+
# Load Zephyr-7b-beta model
|
42 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
43 |
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
|
44 |
|
45 |
+
# Prepare input for Zephyr-7b-beta
|
46 |
inputs = tokenizer(input_text, return_tensors="pt")
|
47 |
inputs = inputs.to(model.device)
|
48 |
|
49 |
+
# Generate response (use pipeline for Zephyr-7b-beta)
|
50 |
+
generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
|
51 |
+
outputs = generator(input_text, max_length=MAX_LENGTH, num_beams=NUM_BEAMS, temperature=0.7)
|
|
|
|
|
|
|
|
|
52 |
|
53 |
# Decode response and extract actions
|
54 |
+
response = outputs[0]['generated_text']
|
55 |
actions = response.split("\n")
|
56 |
|
57 |
# Perform actions
|