File size: 2,837 Bytes
747ccea fe67895 91c1d45 f779047 54a4802 e74c3bc 91c1d45 0e5afe0 6da265e 747ccea 6da265e 91c1d45 6da265e 747ccea 6da265e 747ccea 6da265e 747ccea 91c1d45 6da265e 91c1d45 3176ef0 6da265e 37b20ad 6da265e 3176ef0 91c1d45 6da265e 747ccea 91c1d45 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 |
import gradio as gr
from huggingface_hub import InferenceClient
import os
import requests
# μΆλ‘ API ν΄λΌμ΄μΈνΈ μ€μ
hf_client = InferenceClient("CohereForAI/c4ai-command-r-plus-08-2024", token=os.getenv("HF_TOKEN"))
#hf_client = InferenceClient("CohereForAI/aya-23-35B", token=os.getenv("HF_TOKEN"))
def load_fashion_code():
try:
with open('fashion.cod', 'r', encoding='utf-8') as file:
return file.read()
except FileNotFoundError:
return "fashion.cod νμΌμ μ°Ύμ μ μμ΅λλ€."
except Exception as e:
return f"νμΌμ μ½λ μ€ μ€λ₯κ° λ°μνμ΅λλ€: {str(e)}"
fashion_code = load_fashion_code()
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
global fashion_code
system_prefix = """λ°λμ νκΈλ‘ λ΅λ³ν κ². λλ μ£Όμ΄μ§ μμ€μ½λλ₯Ό κΈ°λ°μΌλ‘ "μλΉμ€ μ¬μ© μ€λͺ
λ° μλ΄, qnaλ₯Ό νλ μν μ΄λ€". μ½λλ₯Ό κΈ°λ°μΌλ‘ μ¬μ© μ€λͺ
λ° μ§μ μλ΅μ μ§ννλ©°, μ΄μ©μμκ² λμμ μ£Όμ΄μΌ νλ€. μ΄μ©μκ° κΆκΈν΄ ν λ§ ν λ΄μ©μ μΉμ νκ² μλ €μ£Όλλ‘ νλΌ. μ½λ μ 체 λ΄μ©μ λν΄μλ 보μμ μ μ§νκ³ , ν€ κ° λ° μλν¬μΈνΈμ ꡬ체μ μΈ λͺ¨λΈμ 곡κ°νμ§ λ§λΌ. """
if message.lower() == "ν¨μ
μ½λ μ€ν":
system_message += f"\n\nν¨μ
μ½λ λ΄μ©:\n{fashion_code}"
message = "ν¨μ
μ½λμ λν΄ μ€λͺ
ν΄μ£ΌμΈμ."
messages = [{"role": "system", "content": f"{system_prefix} {system_message}"}]
# prefix μΆκ°
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ""
for message in hf_client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = message.choices[0].delta.content
if token is not None:
response += token.strip("") # ν ν° μ κ±°
yield response
demo = gr.ChatInterface(
respond,
examples=[
["ν¨μ
μ½λ μ€ν"],
["μ¬μ© λ°©λ²μ μμΈν μ€λͺ
νλΌ"],
["μ¬μ© λ°©λ²μ μ νλΈ μμ μ€ν¬λ¦½νΈ ννλ‘ μμ±νλΌ"],
["μ¬μ© λ°©λ²μ SEO μ΅μ ννμ¬ λΈλ‘κ·Έ ν¬μ€νΈλ‘ 4000 ν ν° μ΄μ μμ±νλΌ"],
["κ³μ μ΄μ΄μ λ΅λ³νλΌ"],
["ν¨μ
μ½λ μ€ν"],
],
cache_examples=False, # μΊμ± λΉνμ±ν μ€μ
# css="""footer {visibility: hidden}""", # μ΄κ³³μ CSSλ₯Ό μΆκ°
)
if __name__ == "__main__":
demo.launch() |