Spaces:
Running
Running
github-actions[bot]
commited on
Commit
·
4e14cb8
1
Parent(s):
88a9a7b
Auto-sync from demo at Thu Sep 11 02:27:07 UTC 2025
Browse files- app.py +2 -2
- graphgen/graphgen.py +0 -1
- webui/app.py +2 -2
app.py
CHANGED
|
@@ -37,10 +37,10 @@ css = """
|
|
| 37 |
def init_graph_gen(config: dict, env: dict) -> GraphGen:
|
| 38 |
# Set up working directory
|
| 39 |
log_file, working_dir = setup_workspace(os.path.join(root_dir, "cache"))
|
| 40 |
-
|
| 41 |
set_logger(log_file, if_stream=True)
|
| 42 |
-
|
| 43 |
|
|
|
|
| 44 |
# Set up LLM clients
|
| 45 |
graph_gen.synthesizer_llm_client = OpenAIModel(
|
| 46 |
model_name=env.get("SYNTHESIZER_MODEL", ""),
|
|
|
|
| 37 |
def init_graph_gen(config: dict, env: dict) -> GraphGen:
|
| 38 |
# Set up working directory
|
| 39 |
log_file, working_dir = setup_workspace(os.path.join(root_dir, "cache"))
|
|
|
|
| 40 |
set_logger(log_file, if_stream=True)
|
| 41 |
+
os.environ.update({k: str(v) for k, v in env.items()})
|
| 42 |
|
| 43 |
+
graph_gen = GraphGen(working_dir=working_dir, config=config)
|
| 44 |
# Set up LLM clients
|
| 45 |
graph_gen.synthesizer_llm_client = OpenAIModel(
|
| 46 |
model_name=env.get("SYNTHESIZER_MODEL", ""),
|
graphgen/graphgen.py
CHANGED
|
@@ -69,7 +69,6 @@ class GraphGen:
|
|
| 69 |
self.tokenizer_instance: Tokenizer = Tokenizer(
|
| 70 |
model_name=self.config["tokenizer"]
|
| 71 |
)
|
| 72 |
-
print(os.getenv("SYNTHESIZER_MODEL"), os.getenv("SYNTHESIZER_API_KEY"))
|
| 73 |
self.synthesizer_llm_client: OpenAIModel = OpenAIModel(
|
| 74 |
model_name=os.getenv("SYNTHESIZER_MODEL"),
|
| 75 |
api_key=os.getenv("SYNTHESIZER_API_KEY"),
|
|
|
|
| 69 |
self.tokenizer_instance: Tokenizer = Tokenizer(
|
| 70 |
model_name=self.config["tokenizer"]
|
| 71 |
)
|
|
|
|
| 72 |
self.synthesizer_llm_client: OpenAIModel = OpenAIModel(
|
| 73 |
model_name=os.getenv("SYNTHESIZER_MODEL"),
|
| 74 |
api_key=os.getenv("SYNTHESIZER_API_KEY"),
|
webui/app.py
CHANGED
|
@@ -37,10 +37,10 @@ css = """
|
|
| 37 |
def init_graph_gen(config: dict, env: dict) -> GraphGen:
|
| 38 |
# Set up working directory
|
| 39 |
log_file, working_dir = setup_workspace(os.path.join(root_dir, "cache"))
|
| 40 |
-
|
| 41 |
set_logger(log_file, if_stream=True)
|
| 42 |
-
|
| 43 |
|
|
|
|
| 44 |
# Set up LLM clients
|
| 45 |
graph_gen.synthesizer_llm_client = OpenAIModel(
|
| 46 |
model_name=env.get("SYNTHESIZER_MODEL", ""),
|
|
|
|
| 37 |
def init_graph_gen(config: dict, env: dict) -> GraphGen:
|
| 38 |
# Set up working directory
|
| 39 |
log_file, working_dir = setup_workspace(os.path.join(root_dir, "cache"))
|
|
|
|
| 40 |
set_logger(log_file, if_stream=True)
|
| 41 |
+
os.environ.update({k: str(v) for k, v in env.items()})
|
| 42 |
|
| 43 |
+
graph_gen = GraphGen(working_dir=working_dir, config=config)
|
| 44 |
# Set up LLM clients
|
| 45 |
graph_gen.synthesizer_llm_client = OpenAIModel(
|
| 46 |
model_name=env.get("SYNTHESIZER_MODEL", ""),
|