File size: 4,752 Bytes
3de0b2c
 
 
cba7e97
 
3de0b2c
 
7bee500
3de0b2c
 
 
1041d8d
3de0b2c
 
 
 
 
 
b38ffc6
3de0b2c
 
 
 
1041d8d
7bee500
3de0b2c
 
 
 
 
 
 
 
 
 
 
 
 
 
b38ffc6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3de0b2c
 
cba7e97
6d71768
cba7e97
 
 
 
 
 
7bee500
cba7e97
de5e7df
3de0b2c
de5e7df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cba7e97
3de0b2c
1041d8d
cba7e97
 
 
 
1041d8d
cba7e97
 
b38ffc6
 
 
 
 
 
 
 
 
3de0b2c
7bee500
3de0b2c
cba7e97
3de0b2c
 
b38ffc6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3de0b2c
cba7e97
3de0b2c
b38ffc6
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
import gradio as gr
import os
from threading import Thread
from llamafactory.chat import ChatModel
from llamafactory.extras.misc import torch_gc

# Set an environment variable
HF_TOKEN = os.environ.get("HF_TOKEN", None)

DESCRIPTION = '''
<div>
<h1 style="text-align: center;">AI Lawyer</h1>
</div>
'''

LICENSE = """
<p/>
---
Built with model "StevenChen16/llama3-8B-Lawyer", based on "meta-llama/Meta-Llama-3-8B"
"""

PLACEHOLDER = """
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
   <h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">AI Lawyer</h1>
   <p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Ask me anything about US and Canada law...</p>
</div>
"""

css = """
h1 {
  text-align: center;
  display: block;
}
#duplicate-button {
  margin: auto;
  color: white;
  background: #1565c0;
  border-radius: 100vh;
}
.chat-message {
  display: flex;
  align-items: flex-start;
  margin-bottom: 10px;
}
.chat-message img {
  width: 40px;
  height: 40px;
  margin-right: 10px;
  border-radius: 50%;
}
.chat-message .message {
  max-width: 80%;
  background-color: #f1f1f1;
  padding: 10px;
  border-radius: 10px;
}
.me .message {
  background-color: #d1e7ff;
}
"""

args = dict(
    model_name_or_path="./model",
    template="llama3",
    finetuning_type="lora",
    quantization_bit=8,
    use_unsloth=True,
)
chat_model = ChatModel(args)

background_prompt = """
As an AI legal assistant, you are a highly trained expert in U.S. and Canadian law. Your purpose is to provide accurate, comprehensive, and professional legal information to assist users with a wide range of legal questions and issues. 

When responding to queries, adhere to the following guidelines:

1. Clarity and Precision: 
- Provide clear, concise answers using precise legal terminology.
- Explain complex legal concepts in a manner accessible to non-legal professionals.

2. Comprehensive Coverage:
- Offer thorough, well-rounded responses that address all relevant aspects of the question.
- Explain pertinent legal principles, statutes, case law, and their implications.

3. Contextual Relevance: 
- Tailor your advice to the specific context of each question.
- Utilize examples or analogies to illustrate legal concepts when appropriate.

4. Statutory and Case Law References:
- When citing statutes, explain their relevance and application to the matter at hand.
- When referencing case law, summarize the key facts, legal issues, court decisions, and the broader implications of the ruling.

5. Professional Tone:
- Maintain a professional, respectful demeanor in all interactions.
- Ensure your advice is legally sound and adheres to the highest ethical standards.

Remember, your role is to provide general legal information and analysis. 

This is a detailed description of the case or general questions, or detailed instructions for you:
"""

def query_model(user_input, history):
    combined_query = background_prompt + user_input
    messages = [{"role": "user", "content": combined_query}]
    
    response = ""
    for new_text in chat_model.stream_chat(messages, max_new_tokens=512, temperature=0.9):
        response += new_text
        yield response

# 格式化消息
def format_message(role, content):
    if role == 'user':
        avatar = '<div class="chat-message me"><img src="data:image/svg+xml,<svg xmlns=\'http://www.w3.org/2000/svg\' viewBox=\'0 0 100 100\'><rect width=\'100\' height=\'100\' fill=\'black\'/><text x=\'50%\' y=\'50%\' fill=\'white\' font-size=\'50\' text-anchor=\'middle\' alignment-baseline=\'central\'>Me</text></svg>" />'
    else:
        avatar = '<div class="chat-message"><img src="avatar.png" />'
    return f'{avatar}<div class="message">{content}</div></div>'

# Gradio block
chatbot = gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface')

with gr.Blocks(css=css) as demo:
    gr.Markdown(DESCRIPTION)
    gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")

    def respond(message, history):
        formatted_user_message = format_message('user', message)
        history.append((message, formatted_user_message))
        response = query_model(message, history)
        formatted_ai_response = format_message('ai', next(response))
        history.append((message, formatted_ai_response))
        return history, history

    chatbot = gr.Chatbot(label='Gradio ChatInterface')
    input_text = gr.Textbox(label="Input", placeholder="Type your question here...")
    send_button = gr.Button("Send")
    
    send_button.click(respond, [input_text, chatbot], [chatbot, chatbot])
    
    gr.Markdown(LICENSE)

if __name__ == "__main__":
    demo.launch(share=True)