Spaces:
Running
Running
Let Graph RAG demo use OpenAI instead of local models.
Browse files
Dockerfile
CHANGED
@@ -13,7 +13,8 @@ RUN --mount=type=secret,id=LYNXSCRIBE_DEPLOY_KEY,mode=0444,required=true \
|
|
13 |
-e lynxkite-graph-analytics \
|
14 |
-e lynxkite-bio \
|
15 |
-e lynxkite-lynxscribe \
|
16 |
-
-e lynxkite-pillow-example
|
|
|
17 |
WORKDIR $HOME/app/examples
|
18 |
ENV PORT=7860
|
19 |
CMD ["uv", "run", "lynxkite"]
|
|
|
13 |
-e lynxkite-graph-analytics \
|
14 |
-e lynxkite-bio \
|
15 |
-e lynxkite-lynxscribe \
|
16 |
+
-e lynxkite-pillow-example \
|
17 |
+
chromadb openai
|
18 |
WORKDIR $HOME/app/examples
|
19 |
ENV PORT=7860
|
20 |
CMD ["uv", "run", "lynxkite"]
|
lynxkite-lynxscribe/src/lynxkite_lynxscribe/llm_ops.py
CHANGED
@@ -25,7 +25,7 @@ op = ops.op_registration(ENV)
|
|
25 |
def chat(*args, **kwargs):
|
26 |
import openai
|
27 |
|
28 |
-
chat_client = openai.OpenAI(
|
29 |
key = json.dumps({"method": "chat", "args": args, "kwargs": kwargs})
|
30 |
if key not in LLM_CACHE:
|
31 |
completion = chat_client.chat.completions.create(*args, **kwargs)
|
@@ -36,7 +36,7 @@ def chat(*args, **kwargs):
|
|
36 |
def embedding(*args, **kwargs):
|
37 |
import openai
|
38 |
|
39 |
-
embedding_client = openai.OpenAI(
|
40 |
key = json.dumps({"method": "embedding", "args": args, "kwargs": kwargs})
|
41 |
if key not in LLM_CACHE:
|
42 |
res = embedding_client.embeddings.create(*args, **kwargs)
|
|
|
25 |
def chat(*args, **kwargs):
|
26 |
import openai
|
27 |
|
28 |
+
chat_client = openai.OpenAI()
|
29 |
key = json.dumps({"method": "chat", "args": args, "kwargs": kwargs})
|
30 |
if key not in LLM_CACHE:
|
31 |
completion = chat_client.chat.completions.create(*args, **kwargs)
|
|
|
36 |
def embedding(*args, **kwargs):
|
37 |
import openai
|
38 |
|
39 |
+
embedding_client = openai.OpenAI()
|
40 |
key = json.dumps({"method": "embedding", "args": args, "kwargs": kwargs})
|
41 |
if key not in LLM_CACHE:
|
42 |
res = embedding_client.embeddings.create(*args, **kwargs)
|