Spaces:
Sleeping
Sleeping
Mustehson
commited on
Commit
·
d10cca1
1
Parent(s):
1d603b6
Separate GPU Process
Browse files
app.py
CHANGED
@@ -77,8 +77,16 @@ def get_prompt(schema, query_input):
|
|
77 |
"""
|
78 |
return text
|
79 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
# Generate SQL
|
81 |
-
@spaces.GPU(duration=120)
|
82 |
def text2sql(table, query_input):
|
83 |
if table is None:
|
84 |
return {
|
@@ -94,10 +102,7 @@ def text2sql(table, query_input):
|
|
94 |
print(f'Prompt Generated...')
|
95 |
try:
|
96 |
print(f'Generating SQL... {model.device}')
|
97 |
-
|
98 |
-
input_token_len = input_ids.shape[1]
|
99 |
-
outputs = model.generate(input_ids.to(model.device), max_new_tokens=1024)
|
100 |
-
result = tokenizer.decode(outputs[0][input_token_len:], skip_special_tokens=True)
|
101 |
print('SQL Generated...')
|
102 |
except Exception as e:
|
103 |
return {
|
|
|
77 |
"""
|
78 |
return text
|
79 |
|
80 |
+
@spaces.GPU(duration=60)
|
81 |
+
def generate_sql(prompt):
|
82 |
+
|
83 |
+
input_ids = tokenizer(prompt, return_tensors="pt").input_ids
|
84 |
+
input_token_len = input_ids.shape[1]
|
85 |
+
outputs = model.generate(input_ids.to(model.device), max_new_tokens=1024)
|
86 |
+
result = tokenizer.decode(outputs[0][input_token_len:], skip_special_tokens=True)
|
87 |
+
return result
|
88 |
+
|
89 |
# Generate SQL
|
|
|
90 |
def text2sql(table, query_input):
|
91 |
if table is None:
|
92 |
return {
|
|
|
102 |
print(f'Prompt Generated...')
|
103 |
try:
|
104 |
print(f'Generating SQL... {model.device}')
|
105 |
+
result = generate_sql(prompt)
|
|
|
|
|
|
|
106 |
print('SQL Generated...')
|
107 |
except Exception as e:
|
108 |
return {
|