File size: 1,480 Bytes
c0ba1b5 a3a3430 c0ba1b5 a3a3430 c0ba1b5 a3a3430 c0ba1b5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
import os, gradio as gr
from openai import OpenAI
# 初始化 DeepSeek 客户端
client = OpenAI(
api_key=os.getenv("DEEPSEEK_API_KEY"),
base_url="https://api.deepseek.com/v1"
)
def analyze_stream(temperature, light, soil_humidity):
"""
接收三个环境数据指标,调用 DeepSeek chat 模型进行流式分析。
将生成的 token 实时 yield 回前端。
"""
messages = [
{"role": "system", "content": "You are a plant care assistant."},
{"role": "user", "content":
f"Temperature: {temperature}°C, Light: {light} lux, Soil Humidity: {soil_humidity}%."}
]
stream = client.chat.completions.create(
model="deepseek-chat",
messages=messages,
temperature=0.7,
stream=True
)
partial = ""
for chunk in stream:
delta = chunk.choices[0].delta.content or ""
partial += delta
yield partial # 持续输出拼接中的内容
# 定义 Gradio 界面
demo = gr.Interface(
fn=analyze_stream,
inputs=[
gr.Number(label="Temperature (°C)"),
gr.Number(label="Light (lux)"),
gr.Number(label="Soil Humidity (%)")
],
outputs=gr.Textbox(label="DeepSeek 分析结果"),
title="🌱 实时植物环境分析",
description="输入当前环境参数后,系统将流式返回 DeepSeek 的分析建议。",
)
# 启用 Gradio 的队列和流式输出
demo.queue(concurrency_count=2).launch(server_name="0.0.0.0")
|