Spaces:
Running
Running
Commit
·
6181597
0
Parent(s):
First commit with Gradio
Browse files- .gradio/flagged/dataset1.csv +2 -0
- README.md +36 -0
- app.py +31 -0
- requirements.txt +3 -0
.gradio/flagged/dataset1.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
What shall Leonardo code today?,Code Style,🧾 Leonardo's Work,timestamp
|
2 |
+
a cli tool to list files,Clean & Pythonic,,2025-04-18 15:39:18.850751
|
README.md
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Leonardo da Code-Vinci
|
3 |
+
emoji: 🧠
|
4 |
+
colorFrom: indigo
|
5 |
+
colorTo: pink
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 4.25.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: mit
|
11 |
+
tags:
|
12 |
+
- gradio
|
13 |
+
- code
|
14 |
+
- llm
|
15 |
+
- deepseek
|
16 |
+
- demo
|
17 |
+
- huggingface
|
18 |
+
---
|
19 |
+
|
20 |
+
# 📜 Codice Da Vinci
|
21 |
+
|
22 |
+
> *“Simplicity is the ultimate sophistication... unless you're writing Python one-liners.”*
|
23 |
+
|
24 |
+
Welcome to **Leonardo da Code-Vinci**, your Renaissance-inspired large language model coding assistant.
|
25 |
+
Whether you're building bridges, drawing algorithms, or debugging with divine flair — Leo's got your back.
|
26 |
+
|
27 |
+
🛠️ Powered by `DeepSeek-Coder-V2-Lite-Instruct`
|
28 |
+
🎨 Built with Gradio
|
29 |
+
🤗 Hosted on Hugging Face Spaces
|
30 |
+
|
31 |
+
Try asking:
|
32 |
+
- "Write a Python function to calculate Fibonacci numbers"
|
33 |
+
- "Explain recursion like you're Leonardo da Vinci"
|
34 |
+
- "Generate a JavaScript function to toggle dark mode"
|
35 |
+
|
36 |
+
---
|
app.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
+
import torch
|
4 |
+
|
5 |
+
tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct") # Or your own!
|
6 |
+
model = AutoModelForCausalLM.from_pretrained("deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct", device_map="auto", torch_dtype=torch.float16, trust_remote_code=True)
|
7 |
+
|
8 |
+
def generate_code(prompt, style="Clean & Pythonic"):
|
9 |
+
if style == "Verbose like a 15th-century manuscript":
|
10 |
+
prompt = "In a manner most detailed, write code that... " + prompt
|
11 |
+
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
12 |
+
outputs = model.generate(**inputs,
|
13 |
+
max_new_tokens=256,
|
14 |
+
do_sample=True,
|
15 |
+
temperature=1.0,
|
16 |
+
top_p=0.95,
|
17 |
+
use_cache=False)
|
18 |
+
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
19 |
+
|
20 |
+
demo = gr.Interface(
|
21 |
+
fn=generate_code,
|
22 |
+
inputs=[
|
23 |
+
gr.Textbox(label="How shall Codice Da Vinci help today?", lines=3),
|
24 |
+
gr.Dropdown(["Clean & Pythonic", "Verbose like a 15th-century manuscript"], label="Code Style")
|
25 |
+
],
|
26 |
+
outputs=gr.Code(label="🧾 Leonardo's Work"),
|
27 |
+
title="Codice Da Vinci 🧠💻",
|
28 |
+
description="Your Renaissance coding assistant. Fluent in algorithms and Latin. Powered by LLM."
|
29 |
+
)
|
30 |
+
|
31 |
+
demo.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
gradio
|
2 |
+
transformers
|
3 |
+
torch
|