m-ric HF staff commited on
Commit
099bf82
·
1 Parent(s): 9324245

Various improvements

Browse files
Files changed (1) hide show
  1. app.py +14 -12
app.py CHANGED
@@ -20,8 +20,7 @@ except Exception as e:
20
 
21
  TOKEN_COSTS = pd.DataFrame.from_dict(TOKEN_COSTS, orient='index').reset_index()
22
  TOKEN_COSTS.columns = ['model'] + list(TOKEN_COSTS.columns[1:])
23
- print(TOKEN_COSTS)
24
- print(TOKEN_COSTS.columns)
25
 
26
 
27
  def count_string_tokens(string: str, model: str) -> int:
@@ -39,11 +38,14 @@ def calculate_total_cost(prompt_tokens: int, completion_tokens: int, model: str)
39
  return prompt_cost, completion_cost
40
 
41
  def update_model_list(function_calling, litellm_provider, max_price):
42
- filtered_models = TOKEN_COSTS[
43
- (TOKEN_COSTS['litellm_provider'] == litellm_provider)
44
- ]
45
- list_models = filtered_models['model'].tolist()
46
- return gr.Dropdown(choices=list_models, value=list_models[0])
 
 
 
47
 
48
  def compute_all(input_type, prompt_text, completion_text, prompt_tokens, completion_tokens, model):
49
  if input_type == "Text Input":
@@ -62,7 +64,7 @@ def compute_all(input_type, prompt_text, completion_text, prompt_tokens, complet
62
  f"${total_cost:.6f}"
63
  )
64
 
65
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
66
  gr.Markdown("""
67
  # Text-to-$$$: Calculate the price of your LLM runs
68
  Based on data from [litellm](https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json).
@@ -80,17 +82,17 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
80
  with gr.Group(visible=False) as token_input_group:
81
  prompt_tokens_input = gr.Number(label="Prompt Tokens (thousands)", value=1.5)
82
  completion_tokens_input = gr.Number(label="Completion Tokens (thousands)", value=2)
83
-
84
  gr.Markdown("## Model choice:")
85
  with gr.Row():
86
  function_calling = gr.Checkbox(label="Supports Function Calling", value=False)
87
- litellm_provider = gr.Dropdown(label="LiteLLM Provider", choices=TOKEN_COSTS['litellm_provider'].unique().tolist())
88
 
89
  max_price = gr.Slider(label="Max Price per Token (input + output)", minimum=0, maximum=0.001, step=0.00001, value=0.001)
90
 
91
- model = gr.Dropdown(label="Model", choices=TOKEN_COSTS['model'].tolist())
92
 
93
- compute_button = gr.Button("Compute Costs", variant="primary")
94
 
95
  with gr.Column(scale=1):
96
  with gr.Group():
 
20
 
21
  TOKEN_COSTS = pd.DataFrame.from_dict(TOKEN_COSTS, orient='index').reset_index()
22
  TOKEN_COSTS.columns = ['model'] + list(TOKEN_COSTS.columns[1:])
23
+ TOKEN_COSTS = TOKEN_COSTS.loc[~TOKEN_COSTS["model"].str.contains("sample_spec")]
 
24
 
25
 
26
  def count_string_tokens(string: str, model: str) -> int:
 
38
  return prompt_cost, completion_cost
39
 
40
  def update_model_list(function_calling, litellm_provider, max_price):
41
+ if litellm_provider == "Any":
42
+ return gr.Dropdown(choices=TOKEN_COSTS['model'].tolist(), value=list_models[0])
43
+ else:
44
+ filtered_models = TOKEN_COSTS[
45
+ (TOKEN_COSTS['litellm_provider'] == litellm_provider)
46
+ ]
47
+ list_models = filtered_models['model'].tolist()
48
+ return gr.Dropdown(choices=list_models, value=list_models[0])
49
 
50
  def compute_all(input_type, prompt_text, completion_text, prompt_tokens, completion_tokens, model):
51
  if input_type == "Text Input":
 
64
  f"${total_cost:.6f}"
65
  )
66
 
67
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue=gr.themes.colors.yellow, secondary_hue=gr.themes.colors.orange)) as demo:
68
  gr.Markdown("""
69
  # Text-to-$$$: Calculate the price of your LLM runs
70
  Based on data from [litellm](https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json).
 
82
  with gr.Group(visible=False) as token_input_group:
83
  prompt_tokens_input = gr.Number(label="Prompt Tokens (thousands)", value=1.5)
84
  completion_tokens_input = gr.Number(label="Completion Tokens (thousands)", value=2)
85
+
86
  gr.Markdown("## Model choice:")
87
  with gr.Row():
88
  function_calling = gr.Checkbox(label="Supports Function Calling", value=False)
89
+ litellm_provider = gr.Dropdown(label="LiteLLM Provider", choices=["Any"] + TOKEN_COSTS['litellm_provider'].unique().tolist(), value="Any")
90
 
91
  max_price = gr.Slider(label="Max Price per Token (input + output)", minimum=0, maximum=0.001, step=0.00001, value=0.001)
92
 
93
+ model = gr.Dropdown(label="Model", choices=TOKEN_COSTS['model'].tolist(), value=TOKEN_COSTS['model'].tolist()[0])
94
 
95
+ compute_button = gr.Button("Compute Costs", variant="secondary")
96
 
97
  with gr.Column(scale=1):
98
  with gr.Group():