gemini server function call_gemini via anvil
Browse files- WIP.txt +1 -0
- app.py +12 -0
- requirements.txt +1 -0
- test.ipynb +40 -3
WIP.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
clean up after backing up to only host encode_anvil and display appropriate message; note: need to ping daily from update_index script via get call to keep alive - to test this.
|
app.py
CHANGED
|
@@ -2,6 +2,9 @@ from flask import Flask,request,render_template,send_file,jsonify
|
|
| 2 |
import os
|
| 3 |
from transformers import AutoTokenizer, AutoModel
|
| 4 |
import anvil.server
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
anvil.server.connect('PLMOIU5VCGGUOJH2XORIBWV3-ZXZVFLWX7QFIIAF4')
|
| 7 |
|
|
@@ -12,6 +15,15 @@ MESSAGED={'title':'Script Server',
|
|
| 12 |
tokenizer = AutoTokenizer.from_pretrained('allenai/specter')
|
| 13 |
encoder = AutoModel.from_pretrained('allenai/specter')
|
| 14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
@anvil.server.callable
|
| 16 |
def encode_anvil(text):
|
| 17 |
inputs = tokenizer(text, padding=True, truncation=True,
|
|
|
|
| 2 |
import os
|
| 3 |
from transformers import AutoTokenizer, AutoModel
|
| 4 |
import anvil.server
|
| 5 |
+
import pathlib
|
| 6 |
+
import textwrap
|
| 7 |
+
import google.generativeai as genai
|
| 8 |
|
| 9 |
anvil.server.connect('PLMOIU5VCGGUOJH2XORIBWV3-ZXZVFLWX7QFIIAF4')
|
| 10 |
|
|
|
|
| 15 |
tokenizer = AutoTokenizer.from_pretrained('allenai/specter')
|
| 16 |
encoder = AutoModel.from_pretrained('allenai/specter')
|
| 17 |
|
| 18 |
+
GOOGLE_API_KEY=os.genenv('GOOGLE_API_KEY')
|
| 19 |
+
genai.configure(api_key=GOOGLE_API_KEY)
|
| 20 |
+
|
| 21 |
+
@anvil.server.callable
|
| 22 |
+
def call_gemini(text):
|
| 23 |
+
model = genai.GenerativeModel('gemini-pro')
|
| 24 |
+
response = model.generate_content(text)
|
| 25 |
+
return response
|
| 26 |
+
|
| 27 |
@anvil.server.callable
|
| 28 |
def encode_anvil(text):
|
| 29 |
inputs = tokenizer(text, padding=True, truncation=True,
|
requirements.txt
CHANGED
|
@@ -4,3 +4,4 @@ flask
|
|
| 4 |
torch
|
| 5 |
numpy
|
| 6 |
transformers
|
|
|
|
|
|
| 4 |
torch
|
| 5 |
numpy
|
| 6 |
transformers
|
| 7 |
+
google-generativeai
|
test.ipynb
CHANGED
|
@@ -30,7 +30,7 @@
|
|
| 30 |
"metadata": {},
|
| 31 |
"outputs": [],
|
| 32 |
"source": [
|
| 33 |
-
"anvil.server.call('encode_anvil','I am a robot')"
|
| 34 |
]
|
| 35 |
},
|
| 36 |
{
|
|
@@ -45,7 +45,26 @@
|
|
| 45 |
" elif server=='hf': url='https://huggingface.co/spaces/gmshroff/gmserver/encode'\n",
|
| 46 |
" body={'text':text}\n",
|
| 47 |
" response=requests.post(url=url,data=json.dumps(body),headers = {'Content-Type': 'application/json'})\n",
|
| 48 |
-
" return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
]
|
| 50 |
},
|
| 51 |
{
|
|
@@ -56,12 +75,30 @@
|
|
| 56 |
"source": [
|
| 57 |
"headers = {'Content-Type': 'application/json'}\n",
|
| 58 |
"# url='http://127.0.0.1:5000/run'\n",
|
| 59 |
-
"url='https://huggingface.co/spaces/gmshroff/gmserver/
|
| 60 |
"# url='http://127.0.0.1:7860/run'\n",
|
| 61 |
"# body={\"script\":\"python update_valdata.py\"}\n",
|
| 62 |
"# body={\"script\":\"pwd\"}"
|
| 63 |
]
|
| 64 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
{
|
| 66 |
"cell_type": "code",
|
| 67 |
"execution_count": null,
|
|
|
|
| 30 |
"metadata": {},
|
| 31 |
"outputs": [],
|
| 32 |
"source": [
|
| 33 |
+
"anvil.server.call('encode_anvil','I am a robot')[0]"
|
| 34 |
]
|
| 35 |
},
|
| 36 |
{
|
|
|
|
| 45 |
" elif server=='hf': url='https://huggingface.co/spaces/gmshroff/gmserver/encode'\n",
|
| 46 |
" body={'text':text}\n",
|
| 47 |
" response=requests.post(url=url,data=json.dumps(body),headers = {'Content-Type': 'application/json'})\n",
|
| 48 |
+
" return response\n",
|
| 49 |
+
" return json.loads(response.content)['embedding']"
|
| 50 |
+
]
|
| 51 |
+
},
|
| 52 |
+
{
|
| 53 |
+
"cell_type": "code",
|
| 54 |
+
"execution_count": null,
|
| 55 |
+
"metadata": {},
|
| 56 |
+
"outputs": [],
|
| 57 |
+
"source": [
|
| 58 |
+
"response=encode('I am a robot',server='local')"
|
| 59 |
+
]
|
| 60 |
+
},
|
| 61 |
+
{
|
| 62 |
+
"cell_type": "code",
|
| 63 |
+
"execution_count": null,
|
| 64 |
+
"metadata": {},
|
| 65 |
+
"outputs": [],
|
| 66 |
+
"source": [
|
| 67 |
+
"response.content"
|
| 68 |
]
|
| 69 |
},
|
| 70 |
{
|
|
|
|
| 75 |
"source": [
|
| 76 |
"headers = {'Content-Type': 'application/json'}\n",
|
| 77 |
"# url='http://127.0.0.1:5000/run'\n",
|
| 78 |
+
"url='https://huggingface.co/spaces/gmshroff/gmserver/'\n",
|
| 79 |
"# url='http://127.0.0.1:7860/run'\n",
|
| 80 |
"# body={\"script\":\"python update_valdata.py\"}\n",
|
| 81 |
"# body={\"script\":\"pwd\"}"
|
| 82 |
]
|
| 83 |
},
|
| 84 |
+
{
|
| 85 |
+
"cell_type": "code",
|
| 86 |
+
"execution_count": null,
|
| 87 |
+
"metadata": {},
|
| 88 |
+
"outputs": [],
|
| 89 |
+
"source": [
|
| 90 |
+
"response=requests.get(url=url)"
|
| 91 |
+
]
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"cell_type": "code",
|
| 95 |
+
"execution_count": null,
|
| 96 |
+
"metadata": {},
|
| 97 |
+
"outputs": [],
|
| 98 |
+
"source": [
|
| 99 |
+
"response.content"
|
| 100 |
+
]
|
| 101 |
+
},
|
| 102 |
{
|
| 103 |
"cell_type": "code",
|
| 104 |
"execution_count": null,
|