richardr1126 commited on
Commit
4e89a65
·
1 Parent(s): acacece

Switch to A10g

Browse files
Files changed (2) hide show
  1. README.md +1 -1
  2. app.py +6 -6
README.md CHANGED
@@ -5,7 +5,7 @@ colorFrom: gray
5
  colorTo: purple
6
  sdk: gradio
7
  sdk_version: 3.37.0
8
- app_file: app-ngrok.py
9
  pinned: true
10
  license: bigcode-openrail-m
11
  tags:
 
5
  colorTo: purple
6
  sdk: gradio
7
  sdk_version: 3.37.0
8
+ app_file: app.py
9
  pinned: true
10
  license: bigcode-openrail-m
11
  tags:
app.py CHANGED
@@ -48,7 +48,7 @@ firebase_admin.initialize_app(cred)
48
  db = firestore.client()
49
 
50
  def log_message_to_firestore(input_message, db_info, temperature, response_text):
51
- doc_ref = db.collection('codellama-logs').document()
52
  log_data = {
53
  'timestamp': firestore.SERVER_TIMESTAMP,
54
  'temperature': temperature,
@@ -73,7 +73,7 @@ def log_rating_to_firestore(input_message, db_info, temperature, response_text,
73
 
74
  rated_outputs.add(output_id)
75
 
76
- doc_ref = db.collection('codellama-ratings').document()
77
  log_data = {
78
  'timestamp': firestore.SERVER_TIMESTAMP,
79
  'temperature': temperature,
@@ -118,8 +118,8 @@ m = AutoModelForCausalLM.from_pretrained(
118
  #load_in_8bit=True,
119
  )
120
 
121
- # m.config.pad_token_id = m.config.eos_token_id
122
- # m.generation_config.pad_token_id = m.config.eos_token_id
123
 
124
  print(f"Successfully loaded the model {model_name} into memory")
125
 
@@ -179,8 +179,8 @@ def generate(input_message: str, db_info="", temperature=0.2, top_p=0.9, top_k=0
179
  with gr.Blocks(theme='gradio/soft') as demo:
180
  # Elements stack vertically by default just define elements in order you want them to stack
181
  header = gr.HTML("""
182
- <h1 style="text-align: center">SQL CodeLlama Demo</h1>
183
- <h3 style="text-align: center">🕷️☠️🦙 Generate SQL queries from Natural Language 🕷️☠️🧙🦙</h3>
184
  <div style="max-width: 450px; margin: auto; text-align: center">
185
  <p style="font-size: 12px; text-align: center">⚠️ Should take 30-60s to generate. Please rate the response, it helps a lot. If you get a blank output, the model server is currently down, please try again another time.</p>
186
  </div>
 
48
  db = firestore.client()
49
 
50
  def log_message_to_firestore(input_message, db_info, temperature, response_text):
51
+ doc_ref = db.collection('logs').document()
52
  log_data = {
53
  'timestamp': firestore.SERVER_TIMESTAMP,
54
  'temperature': temperature,
 
73
 
74
  rated_outputs.add(output_id)
75
 
76
+ doc_ref = db.collection('ratings').document()
77
  log_data = {
78
  'timestamp': firestore.SERVER_TIMESTAMP,
79
  'temperature': temperature,
 
118
  #load_in_8bit=True,
119
  )
120
 
121
+ m.config.pad_token_id = m.config.eos_token_id
122
+ m.generation_config.pad_token_id = m.config.eos_token_id
123
 
124
  print(f"Successfully loaded the model {model_name} into memory")
125
 
 
179
  with gr.Blocks(theme='gradio/soft') as demo:
180
  # Elements stack vertically by default just define elements in order you want them to stack
181
  header = gr.HTML("""
182
+ <h1 style="text-align: center">SQL Skeleton WizardCoder Demo</h1>
183
+ <h3 style="text-align: center">🕷️☠️🧙‍♂️ Generate SQL queries from Natural Language 🕷️☠️🧙‍♂️</h3>
184
  <div style="max-width: 450px; margin: auto; text-align: center">
185
  <p style="font-size: 12px; text-align: center">⚠️ Should take 30-60s to generate. Please rate the response, it helps a lot. If you get a blank output, the model server is currently down, please try again another time.</p>
186
  </div>