update: filter models + clean files
Browse files- __init__.py +0 -8
- calculator.py +0 -67
- constants.py +0 -106
- content.py +0 -232
- electricity_mix.py +0 -175
- expert.py +0 -156
- impacts.py +0 -116
- models.py +0 -46
- src/__pycache__/__init__.cpython-312.pyc +0 -0
- src/__pycache__/calculator.cpython-312.pyc +0 -0
- src/__pycache__/constants.cpython-312.pyc +0 -0
- src/__pycache__/content.cpython-312.pyc +0 -0
- src/__pycache__/electricity_mix.cpython-312.pyc +0 -0
- src/__pycache__/expert.cpython-312.pyc +0 -0
- src/__pycache__/impacts.cpython-312.pyc +0 -0
- src/__pycache__/models.cpython-312.pyc +0 -0
- src/__pycache__/utils.cpython-312.pyc +0 -0
- src/calculator.py +15 -8
- src/constants.py +85 -89
- src/models.py +12 -5
- utils.py +0 -262
- uv.lock +0 -1
__init__.py
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
from .content import *
|
2 |
-
from .constants import *
|
3 |
-
from .expert import expert_mode
|
4 |
-
from .utils import *
|
5 |
-
from .calculator import calculator_mode
|
6 |
-
from .impacts import get_impacts, display_impacts
|
7 |
-
from .models import load_models
|
8 |
-
from .electricity_mix import *
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
calculator.py
DELETED
@@ -1,67 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
import pandas as pd
|
3 |
-
|
4 |
-
from ecologits.tracers.utils import llm_impacts
|
5 |
-
from src.impacts import get_impacts, display_impacts, display_equivalent
|
6 |
-
from src.utils import format_impacts
|
7 |
-
from src.content import WARNING_CLOSED_SOURCE, WARNING_MULTI_MODAL, WARNING_BOTH
|
8 |
-
from src.models import load_models, clean_models_data
|
9 |
-
|
10 |
-
from src.constants import PROMPTS
|
11 |
-
|
12 |
-
def calculator_mode():
|
13 |
-
|
14 |
-
with st.container(border=True):
|
15 |
-
|
16 |
-
df = load_models()
|
17 |
-
|
18 |
-
col1, col2, col3 = st.columns(3)
|
19 |
-
|
20 |
-
with col1:
|
21 |
-
provider = st.selectbox(label = 'Provider',
|
22 |
-
options = [x for x in df['provider_clean'].unique()],
|
23 |
-
index = 9)
|
24 |
-
provider_raw = df[df['provider_clean'] == provider]['provider'].values[0]
|
25 |
-
|
26 |
-
with col2:
|
27 |
-
model = st.selectbox('Model', [x for x in df['name_clean'].unique() if x in df[df['provider_clean'] == provider]['name_clean'].unique()])
|
28 |
-
model_raw = df[(df['provider_clean'] == provider) & (df['name_clean'] == model)]['name'].values[0]
|
29 |
-
|
30 |
-
with col3:
|
31 |
-
output_tokens = st.selectbox('Example prompt', [x[0] for x in PROMPTS])
|
32 |
-
|
33 |
-
# WARNING DISPLAY
|
34 |
-
|
35 |
-
df_filtered = df[(df['provider_clean'] == provider) & (df['name_clean'] == model)]
|
36 |
-
|
37 |
-
if df_filtered['warning_arch'].values[0] and not df_filtered['warning_multi_modal'].values[0]:
|
38 |
-
st.warning(WARNING_CLOSED_SOURCE)
|
39 |
-
if df_filtered['warning_multi_modal'].values[0] and not df_filtered['warning_arch'].values[0]:
|
40 |
-
st.warning(WARNING_MULTI_MODAL)
|
41 |
-
if df_filtered['warning_arch'].values[0] and df_filtered['warning_multi_modal'].values[0]:
|
42 |
-
st.warning(WARNING_BOTH)
|
43 |
-
|
44 |
-
try:
|
45 |
-
impacts = llm_impacts(
|
46 |
-
provider=provider_raw,
|
47 |
-
model_name=model_raw,
|
48 |
-
output_token_count=[x[1] for x in PROMPTS if x[0] == output_tokens][0],
|
49 |
-
request_latency=100000
|
50 |
-
)
|
51 |
-
|
52 |
-
impacts, _, _ = format_impacts(impacts)
|
53 |
-
|
54 |
-
with st.container(border=True):
|
55 |
-
|
56 |
-
st.markdown('<h3 align = "center">Environmental impacts</h3>', unsafe_allow_html=True)
|
57 |
-
st.markdown('<p align = "center">To understand how the environmental impacts are computed go to the 📖 Methodology tab.</p>', unsafe_allow_html=True)
|
58 |
-
display_impacts(impacts)
|
59 |
-
|
60 |
-
with st.container(border=True):
|
61 |
-
|
62 |
-
st.markdown('<h3 align = "center">That\'s equivalent to ...</h3>', unsafe_allow_html=True)
|
63 |
-
st.markdown('<p align = "center">Making this request to the LLM is equivalent to the following actions :</p>', unsafe_allow_html=True)
|
64 |
-
display_equivalent(impacts)
|
65 |
-
|
66 |
-
except Exception as e:
|
67 |
-
st.error('Could not find the model in the repository. Please try another model.')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
constants.py
DELETED
@@ -1,106 +0,0 @@
|
|
1 |
-
PROVIDERS = [
|
2 |
-
("OpenAI", "openai"),
|
3 |
-
("Anthropic", "anthropic"),
|
4 |
-
("Cohere", "cohere"),
|
5 |
-
("Meta", "huggingface_hub/meta"),
|
6 |
-
("Mistral AI", "mistralai"),
|
7 |
-
]
|
8 |
-
|
9 |
-
OPENAI_MODELS = [
|
10 |
-
("GPT-4o", "gpt-4o"),
|
11 |
-
("GPT-4-Turbo", "gpt-4-turbo"),
|
12 |
-
("GPT-4", "gpt-4"),
|
13 |
-
("GPT-3.5-Turbo", "gpt-3.5-turbo"),
|
14 |
-
]
|
15 |
-
|
16 |
-
ANTHROPIC_MODELS = [
|
17 |
-
("Claude 3 Opus", "claude-3-opus-20240229"),
|
18 |
-
("Claude 3 Sonnet", "claude-3-sonnet-20240229"),
|
19 |
-
("Claude 3 Haiku", "claude-3-haiku-20240307"),
|
20 |
-
("Claude 2.1", "claude-2.1"),
|
21 |
-
("Claude 2.0", "claude-2.0"),
|
22 |
-
("Claude Instant 1.2", "claude-instant-1.2"),
|
23 |
-
]
|
24 |
-
|
25 |
-
COHERE_MODELS = [
|
26 |
-
("Command Light", "command-light"),
|
27 |
-
("Command", "command"),
|
28 |
-
("Command R", "command-r"),
|
29 |
-
("Command R+", "command-r-plus"),
|
30 |
-
]
|
31 |
-
|
32 |
-
META_MODELS = [
|
33 |
-
("Llama 3 8B", "meta-llama/Meta-Llama-3-8B"),
|
34 |
-
("Llama 3 70B", "meta-llama/Meta-Llama-3-70B"),
|
35 |
-
("Llama 2 7B", "meta-llama/Llama-2-7b-hf"),
|
36 |
-
("Llama 2 13B", "meta-llama/Llama-2-13b-hf"),
|
37 |
-
("Llama 2 70B", "meta-llama/Llama-2-70b-hf"),
|
38 |
-
]
|
39 |
-
|
40 |
-
MISTRALAI_MODELS = [
|
41 |
-
("Mistral 7B", "open-mistral-7b"),
|
42 |
-
("Mixtral 8x7B", "open-mixtral-8x7b"),
|
43 |
-
("Mixtral 8x22B", "open-mixtral-8x22b"),
|
44 |
-
("Tiny", "mistral-tiny-2312"),
|
45 |
-
("Small", "mistral-small-2402"),
|
46 |
-
("Medium", "mistral-medium-2312"),
|
47 |
-
("Large", "mistral-large-2402"),
|
48 |
-
]
|
49 |
-
|
50 |
-
PROMPTS = [
|
51 |
-
("Write a Tweet", 50),
|
52 |
-
("Write an email", 170),
|
53 |
-
("Write an article summary", 250),
|
54 |
-
("Small conversation with a chatbot", 400),
|
55 |
-
("Write a report of 5 pages", 5000),
|
56 |
-
("Write the code for this app", 10000)
|
57 |
-
]
|
58 |
-
PROMPTS = [(s + f" ({v} output tokens)", v) for (s, v) in PROMPTS]
|
59 |
-
|
60 |
-
CLOSED_SOURCE_MODELS = {
|
61 |
-
"openai/gpt-4o",
|
62 |
-
"openai/gpt-4-turbo",
|
63 |
-
"openai/gpt-4",
|
64 |
-
"openai/gpt-3.5-turbo",
|
65 |
-
"anthropic/claude-3-opus-20240229",
|
66 |
-
"anthropic/claude-3-sonnet-20240229",
|
67 |
-
"anthropic/claude-3-haiku-20240307",
|
68 |
-
"anthropic/claude-2.1",
|
69 |
-
"anthropic/claude-2.0",
|
70 |
-
"anthropic/claude-instant-1.2",
|
71 |
-
"mistralai/mistral-tiny-2312",
|
72 |
-
"mistralai/mistral-small-2402",
|
73 |
-
"mistralai/mistral-medium-2312",
|
74 |
-
"mistralai/mistral-large-2402",
|
75 |
-
}
|
76 |
-
|
77 |
-
MODELS = [
|
78 |
-
("OpenAI / GPT-4o", "openai/gpt-4o"),
|
79 |
-
("OpenAI / GPT-4-Turbo", "openai/gpt-4-turbo"),
|
80 |
-
("OpenAI / GPT-4", "openai/gpt-4"),
|
81 |
-
("OpenAI / GPT-3.5-Turbo", "openai/gpt-3.5-turbo"),
|
82 |
-
("Anthropic / Claude 3 Opus", "anthropic/claude-3-opus-20240229"),
|
83 |
-
("Anthropic / Claude 3 Sonnet", "anthropic/claude-3-sonnet-20240229"),
|
84 |
-
("Anthropic / Claude 3 Haiku", "anthropic/claude-3-haiku-20240307"),
|
85 |
-
("Anthropic / Claude 2.1", "anthropic/claude-2.1"),
|
86 |
-
("Anthropic / Claude 2.0", "anthropic/claude-2.0"),
|
87 |
-
("Anthropic / Claude Instant 1.2", "anthropic/claude-instant-1.2"),
|
88 |
-
("Mistral AI / Mistral 7B", "mistralai/open-mistral-7b"),
|
89 |
-
("Mistral AI / Mixtral 8x7B", "mistralai/open-mixtral-8x7b"),
|
90 |
-
("Mistral AI / Mixtral 8x22B", "mistralai/open-mixtral-8x22b"),
|
91 |
-
("Mistral AI / Tiny", "mistralai/mistral-tiny-2312"),
|
92 |
-
("Mistral AI / Small", "mistralai/mistral-small-2402"),
|
93 |
-
("Mistral AI / Medium", "mistralai/mistral-medium-2312"),
|
94 |
-
("Mistral AI / Large", "mistralai/mistral-large-2402"),
|
95 |
-
("Meta / Llama 3 8B", "huggingface_hub/meta-llama/Meta-Llama-3-8B"),
|
96 |
-
("Meta / Llama 3 70B", "huggingface_hub/meta-llama/Meta-Llama-3-70B"),
|
97 |
-
("Meta / Llama 2 7B", "huggingface_hub/meta-llama/Llama-2-7b-hf"),
|
98 |
-
("Meta / Llama 2 13B", "huggingface_hub/meta-llama/Llama-2-13b-hf"),
|
99 |
-
("Meta / Llama 2 70B", "huggingface_hub/meta-llama/Llama-2-70b-hf"),
|
100 |
-
("Cohere / Command Light", "cohere/command-light"),
|
101 |
-
("Cohere / Command", "cohere/command"),
|
102 |
-
("Cohere / Command R", "cohere/command-r"),
|
103 |
-
("Cohere / Command R+", "cohere/command-r-plus"),
|
104 |
-
]
|
105 |
-
|
106 |
-
MODEL_REPOSITORY_URL = "https://raw.githubusercontent.com/genai-impact/ecologits/refs/heads/main/ecologits/data/models.json"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
content.py
DELETED
@@ -1,232 +0,0 @@
|
|
1 |
-
HERO_TEXT = """
|
2 |
-
<div align="center">
|
3 |
-
<a href="https://ecologits.ai/">
|
4 |
-
<img style="max-height: 80px" alt="EcoLogits" src="https://raw.githubusercontent.com/genai-impact/ecologits/main/docs/assets/logo_light.png">
|
5 |
-
</a>
|
6 |
-
</div>
|
7 |
-
<h1 align="center">🧮 EcoLogits Calculator</h1>
|
8 |
-
<div align="center">
|
9 |
-
<p style="max-width: 500px; text-align: center">
|
10 |
-
<i><b>EcoLogits</b> is a python library that tracks the <b>energy consumption</b> and <b>environmental
|
11 |
-
footprint</b> of using <b>generative AI</b> models through APIs.</i>
|
12 |
-
</p>
|
13 |
-
</div>
|
14 |
-
<br>
|
15 |
-
"""
|
16 |
-
|
17 |
-
INTRO_TEXT = """
|
18 |
-
This tool is developed and maintained by [GenAI Impact](https://genai-impact.org/) non-profit. Learn more about
|
19 |
-
🌱 EcoLogits by reading the documentation on [ecologits.ai](https://ecologits.ai).
|
20 |
-
|
21 |
-
🩷 Support us by giving a ⭐️ on our [GitHub repository](https://github.com/genai-impact/ecologits) and by following our [LinkedIn page](https://www.linkedin.com/company/genai-impact/).
|
22 |
-
"""
|
23 |
-
|
24 |
-
WARNING_CLOSED_SOURCE = """
|
25 |
-
⚠️ The model architecture has not been publicly released, expect lower precision of estimations.
|
26 |
-
"""
|
27 |
-
|
28 |
-
WARNING_MULTI_MODAL = """
|
29 |
-
⚠️ The model architecture is multimodal, expect lower precision of estimations.
|
30 |
-
"""
|
31 |
-
|
32 |
-
WARNING_BOTH = """
|
33 |
-
⚠️ The model architecture has not been publicly released and is multimodal, expect lower precision of estimations.
|
34 |
-
"""
|
35 |
-
|
36 |
-
ABOUT_TEXT = r"""
|
37 |
-
## 🎯 Our goal
|
38 |
-
**The main goal of the EcoLogits Calculator is to raise awareness on the environmental impacts of LLM inference.**
|
39 |
-
The rapid evolution of generative AI is reshaping numerous industries and aspects of our daily lives. While these
|
40 |
-
advancements offer some benefits, they also **pose substantial environmental challenges that cannot be overlooked**.
|
41 |
-
Plus the issue of AI's environmental footprint has been mainly discussed at training stage but rarely at the inference
|
42 |
-
stage. That is an issue because **inference impacts for LLMs can largely overcome the training impacts when deployed
|
43 |
-
at large scales**.
|
44 |
-
At **[GenAI Impact](https://genai-impact.org/) we are dedicated to understanding and mitigating the environmental
|
45 |
-
impacts of generative AI** through rigorous research, innovative tools, and community engagement. Especially, in early
|
46 |
-
2024 we have launched an new open-source tool called [EcoLogits](https://github.com/genai-impact/ecologits) that tracks
|
47 |
-
the energy consumption and environmental footprint of using generative AI models through APIs.
|
48 |
-
## 🙋 FAQ
|
49 |
-
**How we assess the impacts of closed-source models?**
|
50 |
-
Environmental impacts are calculated based on model architecture and parameter count. For closed-source models, we
|
51 |
-
lack transparency from providers, so we estimate parameter counts using available information. For GPT models, we
|
52 |
-
based our estimates on leaked GPT-4 architecture and scaled parameters count for GPT-4-Turbo and GPT-4o based on
|
53 |
-
pricing differences. For other proprietary models like Anthropic's Claude, we assume similar impacts for models
|
54 |
-
released around the same time with similar performance on public benchmarks. Please note that these estimates are
|
55 |
-
based on assumptions and may not be exact. Our methods are open-source and transparent so you can always see the
|
56 |
-
hypotheses we use.
|
57 |
-
**Which generative AI models or providers are supported?**
|
58 |
-
To see the full list of **generative AI providers** currently supported by EcoLogits, see the following
|
59 |
-
[documentation page](https://ecologits.ai/providers/). As of today we only support LLMs but we plan to add support for
|
60 |
-
embeddings, image generation, multi-modal models and more. If you are interested don't hesitate to
|
61 |
-
[join us](https://genai-impact.org/contact/) and accelerate our work!
|
62 |
-
**How to reduce AI environmental impacts?**
|
63 |
-
* Look at **indirect impacts** of your project. Does the finality of your project is impacting negatively the
|
64 |
-
environment?
|
65 |
-
* **Be frugal** and question your usage or need of AI
|
66 |
-
* Do you really need AI to solve your problem?
|
67 |
-
* Do you really need GenAI to solve your problem? (you can read this [paper](https://aclanthology.org/2023.emnlp-industry.39.pdf))
|
68 |
-
* Use small and specialized models to solve your problem.
|
69 |
-
* Evaluate before, during and after the development of your project the environmental impacts with tools like
|
70 |
-
🌱 [EcoLogits](https://github.com/genai-impact/ecologits) or [CodeCarbon](https://github.com/mlco2/codecarbon)
|
71 |
-
(see [more tools](https://github.com/samuelrince/awesome-green-ai))
|
72 |
-
* Restrict the use case and limit the usage of your tool or feature to the desired purpose.
|
73 |
-
* Do NOT buy new GPUs / hardware
|
74 |
-
* Hardware manufacturing for data centers is around 50% of the impact.
|
75 |
-
* Use cloud instances that are located in low emissions / high energy efficiency data centers
|
76 |
-
(see [electricitymaps.com](https://app.electricitymaps.com/map))
|
77 |
-
* Optimize your models for production
|
78 |
-
* Quantize your models.
|
79 |
-
* Use inference optimization tricks.
|
80 |
-
* Prefer fine-tuning of small and existing models over generalist models.
|
81 |
-
**What is the difference between **EcoLogits** and [CodeCarbon](https://github.com/mlco2/codecarbon)?**
|
82 |
-
EcoLogits is focused on estimating the environmental impacts of generative AI (only LLMs for now) used **through API
|
83 |
-
providers (such as OpenAI, Anthropic, Cloud APIs...)** whereas CodeCarbon is more general tool to measure energy
|
84 |
-
consumption and estimate GHG emissions measurement. If you deploy LLMs locally we encourage you to use CodeCarbon to
|
85 |
-
get real numbers of your energy consumption.
|
86 |
-
## 🤗 Contributing
|
87 |
-
We are eager to get feedback from the community, don't hesitate to engage the discussion with us on this
|
88 |
-
[GitHub thread](https://github.com/genai-impact/ecologits/discussions/45) or message us on
|
89 |
-
[LinkedIn](https://www.linkedin.com/company/genai-impact/).
|
90 |
-
We also welcome any open-source contributions on 🌱 **[EcoLogits](https://github.com/genai-impact/ecologits)** or on
|
91 |
-
🧮 **EcoLogits Calculator**.
|
92 |
-
## ⚖️ License
|
93 |
-
<p xmlns:cc="http://creativecommons.org/ns#" >
|
94 |
-
This work is licensed under
|
95 |
-
<a href="https://creativecommons.org/licenses/by-sa/4.0/?ref=chooser-v1" target="_blank" rel="license noopener noreferrer" style="display:inline-block;">
|
96 |
-
CC BY-SA 4.0
|
97 |
-
</a>
|
98 |
-
<img style="display:inline-block;height:22px!important;margin-left:3px;vertical-align:text-bottom;" src="https://mirrors.creativecommons.org/presskit/icons/cc.svg?ref=chooser-v1" alt="">
|
99 |
-
<img style="display:inline-block;height:22px!important;margin-left:3px;vertical-align:text-bottom;" src="https://mirrors.creativecommons.org/presskit/icons/by.svg?ref=chooser-v1" alt="">
|
100 |
-
<img style="display:inline-block;height:22px!important;margin-left:3px;vertical-align:text-bottom;" src="https://mirrors.creativecommons.org/presskit/icons/sa.svg?ref=chooser-v1" alt="">
|
101 |
-
</p>
|
102 |
-
## 🙌 Acknowledgement
|
103 |
-
We thank [Data For Good](https://dataforgood.fr/) and [Boavizta](https://boavizta.org/en) for supporting the
|
104 |
-
development of this project. Their contributions of tools, best practices, and expertise in environmental impact
|
105 |
-
assessment have been invaluable.
|
106 |
-
We also extend our gratitude to the open-source contributions of 🤗 [Hugging Face](huggingface.com) on the LLM-Perf
|
107 |
-
Leaderboard.
|
108 |
-
## 🤝 Contact
|
109 |
-
For general question on the project, please use the [GitHub thread](https://github.com/genai-impact/ecologits/discussions/45).
|
110 |
-
Otherwise use our contact form on [genai-impact.org/contact](https://genai-impact.org/contact/).
|
111 |
-
"""
|
112 |
-
|
113 |
-
|
114 |
-
METHODOLOGY_TEXT = r"""
|
115 |
-
## 📖 Methodology
|
116 |
-
We have developed a methodology to **estimate the energy consumption and environmental impacts for an LLM inference**
|
117 |
-
based on request parameters and hypotheses on the data center location, the hardware used, the model architecture and
|
118 |
-
more.
|
119 |
-
In this section we will only cover the principles of the methodology related to the 🧮 **EcoLogits Calculator**. If
|
120 |
-
you wish to learn more on the environmental impacts modeling of an LLM request checkout the
|
121 |
-
🌱 [EcoLogits documentation page](https://ecologits.ai/methodology/).
|
122 |
-
### Modeling impacts of an LLM request
|
123 |
-
The environmental impacts of an LLM inference are split into the **usage impacts** $I_{request}^u$ to account for
|
124 |
-
electricity consumption and the **embodied impacts** $I_{request}^e$ that relates to resource extraction, hardware
|
125 |
-
manufacturing and transportation. In general terms it can be expressed as follow:
|
126 |
-
$$ I_{request} = I_{request}^u + I_{request}^e $$
|
127 |
-
$$ I_{request} = E_{request}*F_{em}+\frac{\Delta T}{\Delta L}*I_{server}^e $$
|
128 |
-
With,
|
129 |
-
* $E_{request}$ the estimated energy consumption of the server and its cooling system.
|
130 |
-
* $F_{em}$ the electricity mix that depends on the country and time.
|
131 |
-
* $\frac{\Delta T}{\Delta L}$ the hardware usage ratio i.e. the computation time over the lifetime of the hardware.
|
132 |
-
* $I_{server}^e$ the embodied impacts of the server.
|
133 |
-
Additionally, to ⚡️ **direct energy consumption** the environmental impacts are expressed in **three dimensions
|
134 |
-
(multi-criteria impacts)** that are:
|
135 |
-
* 🌍 **Global Warming Potential** (GWP): Potential impact on global warming in kgCO2eq (commonly known as GHG/carbon
|
136 |
-
emissions).
|
137 |
-
* 🪨 **Abiotic Depletion Potential for Elements** (ADPe): Impact on the depletion of non-living resources such as
|
138 |
-
minerals or metals in kgSbeq.
|
139 |
-
* ⛽️ **Primary Energy** (PE): Total energy consumed from primary sources in MJ.
|
140 |
-
### Principles, Data and Hypotheses
|
141 |
-
We use a **bottom-up methodology** to model impacts, meaning that we will estimate the impacts of low-level physical
|
142 |
-
components to then estimate the impacts at software level (in that case an LLM inference). We also rely on **Life
|
143 |
-
Cycle Approach (LCA) proxies and approach** to model both usage and embodied phases with multi-criteria impacts.
|
144 |
-
If you are interested in this approach we recommend you to read the following [Boavizta](https://boavizta.org/)
|
145 |
-
resources.
|
146 |
-
* [Digital & environment: How to evaluate server manufacturing footprint, beyond greenhouse gas emissions?](https://boavizta.org/en/blog/empreinte-de-la-fabrication-d-un-serveur)
|
147 |
-
* [Boavizta API automated evaluation of environmental impacts of ICT services and equipments](https://boavizta.org/en/blog/boavizta-api-automated-evaluation-of-ict-impacts-on-the-environment)
|
148 |
-
* [Boavizta API documentation](https://doc.api.boavizta.org/)
|
149 |
-
We leverage **open data to estimate the environmental impacts**, here is an exhaustive list of our data providers.
|
150 |
-
* [LLM-Perf Leaderboard](https://huggingface.co/spaces/optimum/llm-perf-leaderboard) to estimate GPU energy consumption
|
151 |
-
and latency based on the model architecture and number of output tokens.
|
152 |
-
* [Boavizta API](https://github.com/Boavizta/boaviztapi) to estimate server embodied impacts and base energy
|
153 |
-
consumption.
|
154 |
-
* [ADEME Base Empreinte®](https://base-empreinte.ademe.fr/) for electricity mix impacts per country.
|
155 |
-
Finally here are the **main hypotheses** we have made to compute the impacts.
|
156 |
-
* ⚠️ **We *"guesstimate"* the model architecture of proprietary LLMs when not disclosed by the provider.**
|
157 |
-
* Production setup: quantized models running on data center grade servers and GPUs such as A100.
|
158 |
-
* Electricity mix does not depend on time (help us enhance EcoLogits and work on this [issue](https://github.com/genai-impact/ecologits/issues/42))
|
159 |
-
* Ignore the following impacts: unused cloud resources, data center building, network and end-user devices... (for now)
|
160 |
-
## Equivalents
|
161 |
-
We have integrated impact equivalents to help people better understand the impacts and have reference points for
|
162 |
-
standard use cases and everyday activities.
|
163 |
-
### Request impacts
|
164 |
-
These equivalents are computed based on the request impacts only.
|
165 |
-
#### 🚶♂️➡️ Walking or 🏃♂️➡️ running distance
|
166 |
-
We compare the ⚡️ direct energy consumption with the energy consumption of someone 🚶♂️➡️ walking or 🏃♂️➡️ running.
|
167 |
-
From [runningtools.com](https://www.runningtools.com/energyusage.htm) we consider the following energy values per
|
168 |
-
physical activity (for someone weighing 70kg):
|
169 |
-
* 🚶♂️➡️ walking: $ 196\ kJ/km $ (speed of $ 3\ km/h $)
|
170 |
-
* 🏃♂️➡️ running: $ 294\ kJ/km $ (speed of $ 10\ km/h $)
|
171 |
-
We divide the request energy consumption by these values to compute the distance traveled.
|
172 |
-
#### 🔋 Electric Vehicle distance
|
173 |
-
We compare the ⚡️ direct energy consumption with the energy consumer by a EV car. From
|
174 |
-
[selectra.info](https://selectra.info/energie/actualites/insolite/consommation-vehicules-electriques-france-2040) or
|
175 |
-
[tesla.com](https://www.tesla.com/fr_fr/support/power-consumption) we consider an average value of energy consumed per
|
176 |
-
kilometer of: $ 0.17\ kWh/km $.
|
177 |
-
We divide the request energy consumption by this value to compute the distance driven by an EV.
|
178 |
-
#### ⏯️ Streaming time
|
179 |
-
We compare the 🌍 GHG emissions of the request and of streaming a video. From
|
180 |
-
[impactco2.fr](https://impactco2.fr/outils/comparateur?value=1&comparisons=streamingvideo), we consider that
|
181 |
-
$ 1\ kgCO2eq $ is equivalent to $ 15.6\ h $ of streaming.
|
182 |
-
We multiply that value by the GHG emissions of the request to get an equivalent in hours of video streaming.
|
183 |
-
### Scaled impacts
|
184 |
-
These equivalents are computed based on the request impacts scaled to a worldwide adoption use case. We imply that the
|
185 |
-
same request is done 1% of the planet everyday for 1 year, and then compute impact equivalents.
|
186 |
-
$$
|
187 |
-
I_{scaled} = I_{request} * [1 \\% \ \text{of}\ 8B\ \text{people on earth}] * 365\ \text{days}
|
188 |
-
$$
|
189 |
-
#### Number of 💨 wind turbines or ☢️ nuclear plants
|
190 |
-
We compare the ⚡️ direct energy consumption (scaled) by the energy production of wind turbines and nuclear power
|
191 |
-
plants. From [ecologie.gouv.fr](https://www.ecologie.gouv.fr/eolien-terrestre) we consider that a $ 2\ MW $ wind
|
192 |
-
turbine produces $ 4.2\ GWh $ a year. And from [edf.fr](https://www.edf.fr/groupe-edf/espaces-dedies/jeunes-enseignants/pour-les-jeunes/lenergie-de-a-a-z/produire-de-lelectricite/le-nucleaire-en-chiffres)
|
193 |
-
we learn that a $ 900\ MW $ nuclear power plant produces $ 6\ TWh $ a year.
|
194 |
-
We divide the scaled energy consumption by these values to get the number of wind turbines or nuclear power plants
|
195 |
-
needed.
|
196 |
-
#### Multiplier of 🇮🇪 Ireland electricity consumption
|
197 |
-
We compare the ⚡️ direct energy consumption (scaled) by the electricity consumption of Ireland per year. From
|
198 |
-
[wikipedia.org](https://en.wikipedia.org/wiki/List_of_countries_by_electricity_consumption) we consider the Ireland
|
199 |
-
electricity consumption to be $ 33\ TWh $ a year for a population of 5M.
|
200 |
-
We divide the scaled energy consumption by this value to get the equivalent number of "Ireland countries".
|
201 |
-
#### Number of ✈️ Paris ↔ New York City flights
|
202 |
-
We compare the 🌍 GHG emissions (scaled) of the request and of a return flight Paris ↔ New York City. From
|
203 |
-
[impactco2.fr](https://impactco2.fr/outils/comparateur?value=1&comparisons=&equivalent=avion-pny) we consider that a
|
204 |
-
return flight Paris → New York City → Paris for one passenger emits $ 1,770\ kgCO2eq $ and we consider an overall
|
205 |
-
average load of 100 passengers per flight.
|
206 |
-
We divide the scaled GHG emissions by this value to get the equivalent number of return flights.
|
207 |
-
|
208 |
-
#### If you are motivated to help us test and enhance this methodology [contact us](https://genai-impact.org/contact/)! 💪
|
209 |
-
"""
|
210 |
-
|
211 |
-
CITATION_LABEL = "BibTeX citation for EcoLogits Calculator and the EcoLogits library:"
|
212 |
-
CITATION_TEXT = r"""@misc{ecologits-calculator,
|
213 |
-
author={Samuel Rincé, Adrien Banse and Valentin Defour},
|
214 |
-
title={EcoLogits Calculator},
|
215 |
-
year={2025},
|
216 |
-
howpublished= {\url{https://huggingface.co/spaces/genai-impact/ecologits-calculator}},
|
217 |
-
}
|
218 |
-
@software{ecologits,
|
219 |
-
author = {Samuel Rincé, Adrien Banse, Vinh Nguyen and Luc Berton},
|
220 |
-
publisher = {GenAI Impact},
|
221 |
-
title = {EcoLogits: track the energy consumption and environmental footprint of using generative AI models through APIs.},
|
222 |
-
}"""
|
223 |
-
|
224 |
-
LICENCE_TEXT = """<p xmlns:cc="http://creativecommons.org/ns#" >
|
225 |
-
This work is licensed under
|
226 |
-
<a href="https://creativecommons.org/licenses/by-sa/4.0/?ref=chooser-v1" target="_blank" rel="license noopener noreferrer" style="display:inline-block;">
|
227 |
-
CC BY-SA 4.0
|
228 |
-
</a>
|
229 |
-
<img style="display:inline-block;height:22px!important;margin-left:3px;vertical-align:text-bottom;" src="https://mirrors.creativecommons.org/presskit/icons/cc.svg?ref=chooser-v1" alt="">
|
230 |
-
<img style="display:inline-block;height:22px!important;margin-left:3px;vertical-align:text-bottom;" src="https://mirrors.creativecommons.org/presskit/icons/by.svg?ref=chooser-v1" alt="">
|
231 |
-
<img style="display:inline-block;height:22px!important;margin-left:3px;vertical-align:text-bottom;" src="https://mirrors.creativecommons.org/presskit/icons/sa.svg?ref=chooser-v1" alt="">
|
232 |
-
</p>"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
electricity_mix.py
DELETED
@@ -1,175 +0,0 @@
|
|
1 |
-
from csv import DictReader
|
2 |
-
import pandas as pd
|
3 |
-
|
4 |
-
PATH = "src/data/electricity_mix.csv"
|
5 |
-
|
6 |
-
COUNTRY_CODES = [
|
7 |
-
("🌎 World", "WOR"),
|
8 |
-
("🇪🇺 Europe", "EEE"),
|
9 |
-
("🇿🇼 Zimbabwe", "ZWE"),
|
10 |
-
("🇿🇲 Zambia", "ZMB"),
|
11 |
-
("🇿🇦 South Africa", "ZAF"),
|
12 |
-
("🇾🇪 Yemen", "YEM"),
|
13 |
-
("🇻🇳 Vietnam", "VNM"),
|
14 |
-
("🇻🇪 Venezuela", "VEN"),
|
15 |
-
("🇺🇿 Uzbekistan", "UZB"),
|
16 |
-
("🇺🇾 Uruguay", "URY"),
|
17 |
-
("🇺🇸 United States", "USA"),
|
18 |
-
("🇺🇦 Ukraine", "UKR"),
|
19 |
-
("🇹🇿 Tanzania", "TZA"),
|
20 |
-
("🇹🇼 Taiwan", "TWN"),
|
21 |
-
("🇹🇹 Trinidad and Tobago", "TTO"),
|
22 |
-
("🇹🇷 Turkey", "TUR"),
|
23 |
-
("🇹🇳 Tunisia", "TUN"),
|
24 |
-
("🇹🇲 Turkmenistan", "TKM"),
|
25 |
-
("🇹🇯 Tajikistan", "TJK"),
|
26 |
-
("🇹🇭 Thailand", "THA"),
|
27 |
-
("🇹🇬 Togo", "TGO"),
|
28 |
-
("🇸🇾 Syrian Arab Republic", "SYR"),
|
29 |
-
("🇸🇻 El Salvador", "SLV"),
|
30 |
-
("🇸🇳 Senegal", "SEN"),
|
31 |
-
("🇸🇰 Slovak Republic", "SVK"),
|
32 |
-
("🇸🇮 Slovenia", "SVN"),
|
33 |
-
("🇸🇬 Singapore", "SGP"),
|
34 |
-
("🇸🇪 Sweden", "SWE"),
|
35 |
-
("🇸🇩 Sudan", "SDN"),
|
36 |
-
("🇸🇦 Saudi Arabia", "SAU"),
|
37 |
-
("🇷🇺 Russian Federation", "RUS"),
|
38 |
-
("🇷🇸 Serbia and Montenegro", "SCG"),
|
39 |
-
("🇷🇴 Romania", "ROU"),
|
40 |
-
("🇶🇦 Qatar", "QAT"),
|
41 |
-
("🇵🇾 Paraguay", "PRY"),
|
42 |
-
("🇵🇹 Portugal", "PRT"),
|
43 |
-
("🇵🇱 Poland", "POL"),
|
44 |
-
("🇵🇰 Pakistan", "PAK"),
|
45 |
-
("🇵🇭 Philippines", "PHL"),
|
46 |
-
("🇵🇪 Peru", "PER"),
|
47 |
-
("🇵🇦 Panama", "PAN"),
|
48 |
-
("🇴🇲 Oman", "OMN"),
|
49 |
-
("🇳🇿 New Zealand", "NZL"),
|
50 |
-
("🇳🇵 Nepal", "NPL"),
|
51 |
-
("🇳🇴 Norway", "NOR"),
|
52 |
-
("🇳🇱 Netherlands", "NLD"),
|
53 |
-
("🇳🇮 Nicaragua", "NIC"),
|
54 |
-
("🇳🇬 Nigeria", "NGA"),
|
55 |
-
("🇳🇦 Namibia", "NAM"),
|
56 |
-
("🇲🇿 Mozambique", "MOZ"),
|
57 |
-
("🇲🇾 Malaysia", "MYS"),
|
58 |
-
("🇲🇽 Mexico", "MEX"),
|
59 |
-
("🇲🇹 Malta", "MLT"),
|
60 |
-
("🇲🇳 Mongolia", "MNG"),
|
61 |
-
("🇲🇲 Myanmar", "MMR"),
|
62 |
-
("🇲🇰 North Macedonia", "MKD"),
|
63 |
-
("🇲🇩 Moldova", "MDA"),
|
64 |
-
("🇲🇦 Morocco", "MAR"),
|
65 |
-
("🇱🇾 Libya", "LBY"),
|
66 |
-
("🇱🇻 Latvia", "LVA"),
|
67 |
-
("🇱🇺 Luxembourg", "LUX"),
|
68 |
-
("🇱🇹 Lithuania", "LTU"),
|
69 |
-
("🇱🇰 Sri Lanka", "LKA"),
|
70 |
-
("🇱🇧 Lebanon", "LBN"),
|
71 |
-
("🇰🇿 Kazakhstan", "KAZ"),
|
72 |
-
("🇰🇼 Kuwait", "KWT"),
|
73 |
-
("🇰🇷 South Korea", "KOR"),
|
74 |
-
("🇰🇵 North Korea", "PRK"),
|
75 |
-
("🇰🇭 Cambodia", "KHM"),
|
76 |
-
("🇰🇬 Kyrgyz Republic", "KGZ"),
|
77 |
-
("🇰🇪 Kenya", "KEN"),
|
78 |
-
("🇯🇵 Japan", "JPN"),
|
79 |
-
("🇯🇴 Jordan", "JOR"),
|
80 |
-
("🇯🇲 Jamaica", "JAM"),
|
81 |
-
("🇮🇹 Italy", "ITA"),
|
82 |
-
("🇮🇸 Iceland", "ISL"),
|
83 |
-
("🇮🇷 Iran", "IRN"),
|
84 |
-
("🇮🇶 Iraq", "IRQ"),
|
85 |
-
("🇮🇳 India", "IND"),
|
86 |
-
("🇮🇱 Israel", "ISR"),
|
87 |
-
("🇮🇪 Ireland", "IRL"),
|
88 |
-
("🇮🇩 Indonesia", "IDN"),
|
89 |
-
("🇭🇺 Hungary", "HUN"),
|
90 |
-
("🇭🇹 Haiti", "HTI"),
|
91 |
-
("🇭🇷 Croatia", "HRV"),
|
92 |
-
("🇭🇳 Honduras", "HND"),
|
93 |
-
("🇭🇰 Hong Kong", "HKG"),
|
94 |
-
("🇬🇹 Guatemala", "GTM"),
|
95 |
-
("🇬🇷 Greece", "GRC"),
|
96 |
-
("🇬🇮 Gibraltar", "GIB"),
|
97 |
-
("🇬🇭 Ghana", "GHA"),
|
98 |
-
("🇬🇪 Georgia", "GEO"),
|
99 |
-
("🇬🇧 United Kingdom", "GBR"),
|
100 |
-
("🇬🇦 Gabon", "GAB"),
|
101 |
-
("🇫🇷 France", "FRA"),
|
102 |
-
("🇫🇮 Finland", "FIN"),
|
103 |
-
("🇪🇹 Ethiopia", "ETH"),
|
104 |
-
("🇪🇸 Spain", "ESP"),
|
105 |
-
("🇪🇷 Eritrea", "ERI"),
|
106 |
-
("🇪🇬 Egypt", "EGY"),
|
107 |
-
("🇪🇪 Estonia", "EST"),
|
108 |
-
("🇪🇨 Ecuador", "ECU"),
|
109 |
-
("🇩🇿 Algeria", "DZA"),
|
110 |
-
("🇩🇴 Dominican Republic", "DOM"),
|
111 |
-
("🇩🇰 Denmark", "DNK"),
|
112 |
-
("🇩🇪 Germany", "DEU"),
|
113 |
-
("🇨🇿 Czech Republic", "CZE"),
|
114 |
-
("🇨🇾 Cyprus", "CYP"),
|
115 |
-
("🇨🇺 Cuba", "CUB"),
|
116 |
-
("🇨🇷 Costa Rica", "CRI"),
|
117 |
-
("🇨🇴 Colombia", "COL"),
|
118 |
-
("🇨🇳 China", "CHN"),
|
119 |
-
("🇨🇲 Cameroon", "CMR"),
|
120 |
-
("🇨🇱 Chile", "CHL"),
|
121 |
-
("🇨🇮 Cote d'Ivoire", "CIV"),
|
122 |
-
("🇨🇭 Switzerland", "CHE"),
|
123 |
-
("🇨🇬 Congo", "COG"),
|
124 |
-
("🇨🇩 Democratic Republic of the Congo", "COD"),
|
125 |
-
("🇨🇦 Canada", "CAN"),
|
126 |
-
("🇧🇾 Belarus", "BLR"),
|
127 |
-
("🇧🇼 Botswana", "BWA"),
|
128 |
-
("🇧🇷 Brazil", "BRA"),
|
129 |
-
("🇧🇴 Bolivia", "BOL"),
|
130 |
-
("🇧🇳 Brunei", "BRN"),
|
131 |
-
("🇧🇯 Benin", "BEN"),
|
132 |
-
("🇧🇭 Bahrain", "BHR"),
|
133 |
-
("🇧🇬 Bulgaria", "BGR"),
|
134 |
-
("🇧🇪 Belgium", "BEL"),
|
135 |
-
("🇧🇩 Bangladesh", "BGD"),
|
136 |
-
("🇧🇦 Bosnia and Herzegovina", "BIH"),
|
137 |
-
("🇦🇿 Azerbaijan", "AZE"),
|
138 |
-
("🇦🇺 Australia", "AUS"),
|
139 |
-
("🇦🇹 Austria", "AUT"),
|
140 |
-
("🇦🇷 Argentina", "ARG"),
|
141 |
-
("🇦🇴 Angola", "AGO"),
|
142 |
-
("🇦 Netherlands Antilles", "ANT"),
|
143 |
-
("🇦🇲 Armenia", "ARM"),
|
144 |
-
("🇦🇱 Albania", "ALB"),
|
145 |
-
("🇦🇪 United Arab Emirates", "ARE")
|
146 |
-
]
|
147 |
-
|
148 |
-
|
149 |
-
def find_electricity_mix(code: str):
|
150 |
-
# TODO: Maybe more optimal to construct database at the beginning of the app
|
151 |
-
# in the same fashion as find_model
|
152 |
-
res = []
|
153 |
-
with open(PATH) as fd:
|
154 |
-
csv = DictReader(fd)
|
155 |
-
for row in csv:
|
156 |
-
res += [float(row[code])]
|
157 |
-
return res
|
158 |
-
|
159 |
-
def dataframe_electricity_mix(countries: list):
|
160 |
-
|
161 |
-
df = pd.read_csv('src/data/electricity_mix.csv')
|
162 |
-
df['name_unit'] = df['name'] + ' (' + df['unit'] + ')'
|
163 |
-
df = df[['name_unit'] + [x[1] for x in COUNTRY_CODES if x[0] in countries]]
|
164 |
-
|
165 |
-
df_melted = df.melt(
|
166 |
-
id_vars=['name_unit'],
|
167 |
-
value_vars=[x[1] for x in COUNTRY_CODES if x[0] in countries],
|
168 |
-
var_name='country',
|
169 |
-
value_name='value')
|
170 |
-
|
171 |
-
df = df_melted.pivot(columns='name_unit',
|
172 |
-
index='country',
|
173 |
-
values='value')
|
174 |
-
|
175 |
-
return df
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
expert.py
DELETED
@@ -1,156 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
import pandas as pd
|
3 |
-
from ecologits.impacts.llm import compute_llm_impacts
|
4 |
-
|
5 |
-
from src.utils import format_impacts, average_range_impacts, format_impacts_expert, model_active_params_fn, model_total_params_fn
|
6 |
-
from src.impacts import display_impacts
|
7 |
-
#from src.constants import PROVIDERS, MODELS
|
8 |
-
from src.electricity_mix import COUNTRY_CODES, find_electricity_mix, dataframe_electricity_mix
|
9 |
-
from ecologits.model_repository import models
|
10 |
-
|
11 |
-
import plotly.express as px
|
12 |
-
|
13 |
-
def reset_model():
|
14 |
-
model = 'CUSTOM'
|
15 |
-
|
16 |
-
def expert_mode():
|
17 |
-
|
18 |
-
st.markdown("### 🤓 Expert mode")
|
19 |
-
|
20 |
-
with st.container(border = True):
|
21 |
-
|
22 |
-
########## Model info ##########
|
23 |
-
|
24 |
-
# col1, col2, col3 = st.columns(3)
|
25 |
-
|
26 |
-
# with col1:
|
27 |
-
# provider = st.selectbox(label = 'Provider expert',
|
28 |
-
# options = [x[0] for x in PROVIDERS],
|
29 |
-
# index = 0)
|
30 |
-
# provider = [x[1] for x in PROVIDERS if x[0] == provider][0]
|
31 |
-
# if 'huggingface_hub' in provider:
|
32 |
-
# provider = 'huggingface_hub'
|
33 |
-
|
34 |
-
# with col2:
|
35 |
-
# model = st.selectbox('Model expert', [x[0] for x in MODELS if provider in x[1]])
|
36 |
-
# model = [x[1] for x in MODELS if x[0] == model][0].split('/', 1)[1]
|
37 |
-
|
38 |
-
########## Model parameters ##########
|
39 |
-
|
40 |
-
col11, col22, col33 = st.columns(3)
|
41 |
-
|
42 |
-
with col11:
|
43 |
-
# st.write(provider, model)
|
44 |
-
# st.write(models.find_model(provider, model))
|
45 |
-
# st.write(model_active_params_fn(provider, model, 45))
|
46 |
-
active_params = st.number_input('Active parameters (B)', 0, None, 45)
|
47 |
-
|
48 |
-
with col22:
|
49 |
-
total_params = st.number_input('Total parameters (B)', 0, None, 45)
|
50 |
-
|
51 |
-
with col33:
|
52 |
-
output_tokens = st.number_input('Output completion tokens', 100)
|
53 |
-
|
54 |
-
########## Electricity mix ##########
|
55 |
-
|
56 |
-
location = st.selectbox('Location', [x[0] for x in COUNTRY_CODES])
|
57 |
-
|
58 |
-
col4, col5, col6 = st.columns(3)
|
59 |
-
|
60 |
-
with col4:
|
61 |
-
mix_gwp = st.number_input('Electricity mix - GHG emissions [kgCO2eq / kWh]', find_electricity_mix([x[1] for x in COUNTRY_CODES if x[0] ==location][0])[2], format="%0.6f")
|
62 |
-
#disp_ranges = st.toggle('Display impact ranges', False)
|
63 |
-
with col5:
|
64 |
-
mix_adpe = st.number_input('Electricity mix - Abiotic resources [kgSbeq / kWh]', find_electricity_mix([x[1] for x in COUNTRY_CODES if x[0] ==location][0])[0], format="%0.13f")
|
65 |
-
with col6:
|
66 |
-
mix_pe = st.number_input('Electricity mix - Primary energy [MJ / kWh]', find_electricity_mix([x[1] for x in COUNTRY_CODES if x[0] ==location][0])[1], format="%0.3f")
|
67 |
-
|
68 |
-
impacts = compute_llm_impacts(model_active_parameter_count=active_params,
|
69 |
-
model_total_parameter_count=total_params,
|
70 |
-
output_token_count=output_tokens,
|
71 |
-
request_latency=100000,
|
72 |
-
if_electricity_mix_gwp=mix_gwp,
|
73 |
-
if_electricity_mix_adpe=mix_adpe,
|
74 |
-
if_electricity_mix_pe=mix_pe
|
75 |
-
)
|
76 |
-
|
77 |
-
impacts, usage, embodied = format_impacts(impacts)
|
78 |
-
|
79 |
-
with st.container(border = True):
|
80 |
-
|
81 |
-
st.markdown('<h3 align="center">Environmental Impacts</h2>', unsafe_allow_html = True)
|
82 |
-
|
83 |
-
display_impacts(impacts)
|
84 |
-
|
85 |
-
with st.expander('⚖️ Usage vs Embodied'):
|
86 |
-
|
87 |
-
st.markdown('<h3 align="center">Embodied vs Usage comparison</h2>', unsafe_allow_html = True)
|
88 |
-
|
89 |
-
st.markdown('The usage impacts account for the electricity consumption of the model while the embodied impacts account for resource extraction (e.g., minerals and metals), manufacturing, and transportation of the hardware.')
|
90 |
-
|
91 |
-
col_ghg_comparison, col_adpe_comparison, col_pe_comparison = st.columns(3)
|
92 |
-
|
93 |
-
with col_ghg_comparison:
|
94 |
-
fig_gwp = px.pie(
|
95 |
-
values = [average_range_impacts(usage.gwp.value), average_range_impacts(embodied.gwp.value)],
|
96 |
-
names = ['usage', 'embodied'],
|
97 |
-
title = 'GHG emissions',
|
98 |
-
color_discrete_sequence=["#636EFA", "#00CC96"],
|
99 |
-
width = 100
|
100 |
-
)
|
101 |
-
fig_gwp.update_layout(showlegend=False, title_x=0.5)
|
102 |
-
|
103 |
-
st.plotly_chart(fig_gwp)
|
104 |
-
|
105 |
-
with col_adpe_comparison:
|
106 |
-
fig_adpe = px.pie(
|
107 |
-
values = [average_range_impacts(usage.adpe.value), average_range_impacts(embodied.adpe.value)],
|
108 |
-
names = ['usage', 'embodied'],
|
109 |
-
title = 'Abiotic depletion',
|
110 |
-
color_discrete_sequence=["#00CC96","#636EFA"],
|
111 |
-
width = 100)
|
112 |
-
fig_adpe.update_layout(
|
113 |
-
showlegend=True,
|
114 |
-
legend=dict(yanchor="bottom", x = 0.35, y = -0.1),
|
115 |
-
title_x=0.5)
|
116 |
-
|
117 |
-
st.plotly_chart(fig_adpe)
|
118 |
-
|
119 |
-
with col_pe_comparison:
|
120 |
-
fig_pe = px.pie(
|
121 |
-
values = [average_range_impacts(usage.pe.value), average_range_impacts(embodied.pe.value)],
|
122 |
-
names = ['usage', 'embodied'],
|
123 |
-
title = 'Primary energy',
|
124 |
-
color_discrete_sequence=["#636EFA", "#00CC96"],
|
125 |
-
width = 100)
|
126 |
-
fig_pe.update_layout(showlegend=False, title_x=0.5)
|
127 |
-
|
128 |
-
st.plotly_chart(fig_pe)
|
129 |
-
|
130 |
-
with st.expander('🌍️ Location impact'):
|
131 |
-
|
132 |
-
st.markdown('<h4 align="center">How can location impact the footprint ?</h4>', unsafe_allow_html = True)
|
133 |
-
|
134 |
-
countries_to_compare = st.multiselect(
|
135 |
-
label = 'Countries to compare',
|
136 |
-
options = [x[0] for x in COUNTRY_CODES],
|
137 |
-
default = ["🇫🇷 France", "🇺🇸 United States", "🇨🇳 China"]
|
138 |
-
)
|
139 |
-
|
140 |
-
try:
|
141 |
-
|
142 |
-
df = dataframe_electricity_mix(countries_to_compare)
|
143 |
-
|
144 |
-
impact_type = st.selectbox(
|
145 |
-
label='Select an impact type to compare',
|
146 |
-
options=[x for x in df.columns if x!='country'],
|
147 |
-
index=1)
|
148 |
-
|
149 |
-
df.sort_values(by = impact_type, inplace = True)
|
150 |
-
|
151 |
-
fig_2 = px.bar(df, x = df.index, y = impact_type, text = impact_type, color = impact_type)
|
152 |
-
st.plotly_chart(fig_2)
|
153 |
-
|
154 |
-
except:
|
155 |
-
|
156 |
-
st.warning("Can't display chart with no values.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
impacts.py
DELETED
@@ -1,116 +0,0 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
import ecologits
|
3 |
-
from src.utils import (
|
4 |
-
format_energy_eq_electric_vehicle,
|
5 |
-
format_energy_eq_electricity_consumption_ireland,
|
6 |
-
format_energy_eq_electricity_production,
|
7 |
-
format_energy_eq_physical_activity,
|
8 |
-
format_gwp_eq_airplane_paris_nyc,
|
9 |
-
format_gwp_eq_streaming,
|
10 |
-
PhysicalActivity,
|
11 |
-
EnergyProduction
|
12 |
-
)
|
13 |
-
|
14 |
-
############################################################################################################
|
15 |
-
|
16 |
-
def get_impacts(model, active_params, total_params, mix_ghg, mix_adpe, mix_pe):
|
17 |
-
|
18 |
-
return 1
|
19 |
-
|
20 |
-
############################################################################################################
|
21 |
-
|
22 |
-
|
23 |
-
def display_impacts(impacts):
|
24 |
-
|
25 |
-
st.divider()
|
26 |
-
|
27 |
-
col_energy, col_ghg, col_adpe, col_pe, col_water = st.columns(5)
|
28 |
-
|
29 |
-
with col_energy:
|
30 |
-
st.markdown('<h4 align="center">⚡️ Energy</h4>', unsafe_allow_html = True)
|
31 |
-
st.latex(f'\Large {impacts.energy.magnitude:.3g} \ \large {impacts.energy.units}')
|
32 |
-
st.markdown(f'<p align="center"><i>Evaluates the electricity consumption<i></p>', unsafe_allow_html = True)
|
33 |
-
|
34 |
-
with col_ghg:
|
35 |
-
st.markdown('<h4 align="center">🌍️ GHG Emissions</h4>', unsafe_allow_html = True)
|
36 |
-
st.latex(f'\Large {impacts.gwp.magnitude:.3g} \ \large {impacts.gwp.units}')
|
37 |
-
st.markdown(f'<p align="center"><i>Evaluates the effect on global warming<i></p>', unsafe_allow_html = True)
|
38 |
-
|
39 |
-
with col_adpe:
|
40 |
-
st.markdown('<h4 align="center">🪨 Abiotic Resources</h4>', unsafe_allow_html = True)
|
41 |
-
st.latex(f'\Large {impacts.adpe.magnitude:.3g} \ \large {impacts.adpe.units}')
|
42 |
-
st.markdown(f'<p align="center"><i>Evaluates the use of metals and minerals<i></p>', unsafe_allow_html = True)
|
43 |
-
|
44 |
-
with col_pe:
|
45 |
-
st.markdown('<h4 align="center">⛽️ Primary Energy</h4>', unsafe_allow_html = True)
|
46 |
-
st.latex(f'\Large {impacts.pe.magnitude:.3g} \ \large {impacts.pe.units}')
|
47 |
-
st.markdown(f'<p align="center"><i>Evaluates the use of energy resources<i></p>', unsafe_allow_html = True)
|
48 |
-
|
49 |
-
with col_water:
|
50 |
-
st.markdown('<h4 align="center">🚰 Water</h4>', unsafe_allow_html = True)
|
51 |
-
st.latex('\Large Upcoming...')
|
52 |
-
st.markdown(f'<p align="center"><i>Evaluates the use of water<i></p>', unsafe_allow_html = True)
|
53 |
-
|
54 |
-
############################################################################################################
|
55 |
-
|
56 |
-
def display_equivalent(impacts):
|
57 |
-
|
58 |
-
st.divider()
|
59 |
-
|
60 |
-
ev_eq = format_energy_eq_electric_vehicle(impacts.energy)
|
61 |
-
|
62 |
-
streaming_eq = format_gwp_eq_streaming(impacts.gwp)
|
63 |
-
|
64 |
-
col1, col2, col3 = st.columns(3)
|
65 |
-
|
66 |
-
with col1:
|
67 |
-
physical_activity, distance = format_energy_eq_physical_activity(impacts.energy)
|
68 |
-
if physical_activity == PhysicalActivity.WALKING:
|
69 |
-
physical_activity = "🚶 " + physical_activity.capitalize()
|
70 |
-
if physical_activity == PhysicalActivity.RUNNING:
|
71 |
-
physical_activity = "🏃 " + physical_activity.capitalize()
|
72 |
-
|
73 |
-
st.markdown(f'<h4 align="center">{physical_activity}</h4>', unsafe_allow_html = True)
|
74 |
-
st.latex(f'\Large {distance.magnitude:.3g} \ \large {distance.units}')
|
75 |
-
st.markdown(f'<p align="center"><i>Based on energy consumption<i></p>', unsafe_allow_html = True)
|
76 |
-
|
77 |
-
with col2:
|
78 |
-
ev_eq = format_energy_eq_electric_vehicle(impacts.energy)
|
79 |
-
st.markdown(f'<h4 align="center">🔋 Electric Vehicle</h4>', unsafe_allow_html = True)
|
80 |
-
st.latex(f'\Large {ev_eq.magnitude:.3g} \ \large {ev_eq.units}')
|
81 |
-
st.markdown(f'<p align="center"><i>Based on energy consumption<i></p>', unsafe_allow_html = True)
|
82 |
-
|
83 |
-
with col3:
|
84 |
-
streaming_eq = format_gwp_eq_streaming(impacts.gwp)
|
85 |
-
st.markdown(f'<h4 align="center">⏯️ Streaming</h4>', unsafe_allow_html = True)
|
86 |
-
st.latex(f'\Large {streaming_eq.magnitude:.3g} \ \large {streaming_eq.units}')
|
87 |
-
st.markdown(f'<p align="center"><i>Based on GHG emissions<i></p>', unsafe_allow_html = True)
|
88 |
-
|
89 |
-
st.divider()
|
90 |
-
|
91 |
-
st.markdown('<h3 align="center">What if 1% of the planet does this request everyday for 1 year ?</h3>', unsafe_allow_html = True)
|
92 |
-
st.markdown('<p align="center">If this use case is largely deployed around the world, the equivalent impacts would be the impacts of this request x 1% of 8 billion people x 365 days in a year.</p>', unsafe_allow_html = True)
|
93 |
-
|
94 |
-
col4, col5, col6 = st.columns(3)
|
95 |
-
|
96 |
-
with col4:
|
97 |
-
|
98 |
-
electricity_production, count = format_energy_eq_electricity_production(impacts.energy)
|
99 |
-
if electricity_production == EnergyProduction.NUCLEAR:
|
100 |
-
emoji = "☢️"
|
101 |
-
name = "Nuclear power plants"
|
102 |
-
if electricity_production == EnergyProduction.WIND:
|
103 |
-
emoji = "💨️ "
|
104 |
-
name = "Wind turbines"
|
105 |
-
st.markdown(f'<h4 align="center">{emoji} {count.magnitude:.0f} {name} (yearly)</h4>', unsafe_allow_html = True)
|
106 |
-
st.markdown(f'<p align="center"><i>Based on energy consumption<i></p>', unsafe_allow_html = True)
|
107 |
-
|
108 |
-
with col5:
|
109 |
-
ireland_count = format_energy_eq_electricity_consumption_ireland(impacts.energy)
|
110 |
-
st.markdown(f'<h4 align="center">🇮🇪 {ireland_count.magnitude:.3f} x Ireland <span style="font-size: 12px">(yearly ⚡️ cons.)</span></h2></h4>', unsafe_allow_html = True)
|
111 |
-
st.markdown(f'<p align="center"><i>Based on energy consumption<i></p>', unsafe_allow_html = True)
|
112 |
-
|
113 |
-
with col6:
|
114 |
-
paris_nyc_airplane = format_gwp_eq_airplane_paris_nyc(impacts.gwp)
|
115 |
-
st.markdown(f'<h4 align="center">✈️ {round(paris_nyc_airplane.magnitude):,} Paris ↔ NYC</h4>', unsafe_allow_html = True)
|
116 |
-
st.markdown(f'<p align="center"><i>Based on GHG emissions<i></p>', unsafe_allow_html = True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
models.py
DELETED
@@ -1,46 +0,0 @@
|
|
1 |
-
import requests
|
2 |
-
import json
|
3 |
-
import pandas as pd
|
4 |
-
from src.constants import MODEL_REPOSITORY_URL
|
5 |
-
|
6 |
-
def clean_models_data(df):
|
7 |
-
|
8 |
-
dict_providers = {
|
9 |
-
'google': 'Google',
|
10 |
-
'mistralai': 'MistralAI',
|
11 |
-
'meta-llama': 'Meta',
|
12 |
-
'openai': 'OpenAI',
|
13 |
-
'anthropic': 'Anthropic',
|
14 |
-
'cohere': 'Cohere',
|
15 |
-
'microsoft': 'Microsoft',
|
16 |
-
'mistral-community': 'Mistral Community',
|
17 |
-
'databricks': 'Databricks'
|
18 |
-
}
|
19 |
-
|
20 |
-
df.drop('type', axis=1, inplace=True)
|
21 |
-
|
22 |
-
df.loc[df['name'].str.contains('/'), 'name_clean'] = df.loc[df['name'].str.contains('/'), 'name'].str.split('/').str[1]
|
23 |
-
df['name_clean'] = df['name_clean'].fillna(df['name'])
|
24 |
-
df['name_clean'] = df['name_clean'].replace({'-': ' '}, regex = True)
|
25 |
-
|
26 |
-
df.loc[df['provider'] == 'huggingface_hub', 'provider_clean'] = df.loc[df['provider'] == 'huggingface_hub', 'name'].str.split('/').str[0]
|
27 |
-
df['provider_clean'] = df['provider_clean'].fillna(df['provider'])
|
28 |
-
df['provider_clean'] = df['provider_clean'].replace(dict_providers, regex = True)
|
29 |
-
|
30 |
-
df['architecture_type'] = df['architecture'].apply(lambda x: x['type'])
|
31 |
-
df['architecture_parameters'] = df['architecture'].apply(lambda x: x['parameters'])
|
32 |
-
|
33 |
-
df['warnings'] = df['warnings'].apply(lambda x: ', '.join(x) if x else None).fillna('none')
|
34 |
-
df['warning_arch'] = df['warnings'].apply(lambda x: 'model-arch-not-released' in x)
|
35 |
-
df['warning_multi_modal'] = df['warnings'].apply(lambda x: 'model-arch-multimodal' in x)
|
36 |
-
|
37 |
-
return df[['provider', 'provider_clean', 'name', 'name_clean', 'architecture_type', 'architecture_parameters', 'warning_arch', 'warning_multi_modal']]
|
38 |
-
|
39 |
-
def load_models():
|
40 |
-
|
41 |
-
resp = requests.get(MODEL_REPOSITORY_URL)
|
42 |
-
data = json.loads(resp.text)
|
43 |
-
df = pd.DataFrame(data['models'])
|
44 |
-
|
45 |
-
return clean_models_data(df)
|
46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/__pycache__/__init__.cpython-312.pyc
CHANGED
Binary files a/src/__pycache__/__init__.cpython-312.pyc and b/src/__pycache__/__init__.cpython-312.pyc differ
|
|
src/__pycache__/calculator.cpython-312.pyc
CHANGED
Binary files a/src/__pycache__/calculator.cpython-312.pyc and b/src/__pycache__/calculator.cpython-312.pyc differ
|
|
src/__pycache__/constants.cpython-312.pyc
CHANGED
Binary files a/src/__pycache__/constants.cpython-312.pyc and b/src/__pycache__/constants.cpython-312.pyc differ
|
|
src/__pycache__/content.cpython-312.pyc
CHANGED
Binary files a/src/__pycache__/content.cpython-312.pyc and b/src/__pycache__/content.cpython-312.pyc differ
|
|
src/__pycache__/electricity_mix.cpython-312.pyc
CHANGED
Binary files a/src/__pycache__/electricity_mix.cpython-312.pyc and b/src/__pycache__/electricity_mix.cpython-312.pyc differ
|
|
src/__pycache__/expert.cpython-312.pyc
CHANGED
Binary files a/src/__pycache__/expert.cpython-312.pyc and b/src/__pycache__/expert.cpython-312.pyc differ
|
|
src/__pycache__/impacts.cpython-312.pyc
CHANGED
Binary files a/src/__pycache__/impacts.cpython-312.pyc and b/src/__pycache__/impacts.cpython-312.pyc differ
|
|
src/__pycache__/models.cpython-312.pyc
CHANGED
Binary files a/src/__pycache__/models.cpython-312.pyc and b/src/__pycache__/models.cpython-312.pyc differ
|
|
src/__pycache__/utils.cpython-312.pyc
CHANGED
Binary files a/src/__pycache__/utils.cpython-312.pyc and b/src/__pycache__/utils.cpython-312.pyc differ
|
|
src/calculator.py
CHANGED
@@ -6,6 +6,7 @@ from src.impacts import get_impacts, display_impacts, display_equivalent
|
|
6 |
from src.utils import format_impacts
|
7 |
from src.content import WARNING_CLOSED_SOURCE, WARNING_MULTI_MODAL, WARNING_BOTH
|
8 |
from src.models import load_models, clean_models_data
|
|
|
9 |
|
10 |
from src.constants import PROMPTS
|
11 |
|
@@ -13,25 +14,30 @@ def calculator_mode():
|
|
13 |
|
14 |
with st.container(border=True):
|
15 |
|
16 |
-
df = load_models()
|
17 |
|
18 |
col1, col2, col3 = st.columns(3)
|
19 |
|
20 |
with col1:
|
21 |
-
provider = st.selectbox(
|
22 |
-
|
23 |
-
|
24 |
-
|
|
|
25 |
|
26 |
with col2:
|
27 |
-
model = st.selectbox(
|
28 |
-
|
|
|
|
|
29 |
|
30 |
with col3:
|
31 |
output_tokens = st.selectbox('Example prompt', [x[0] for x in PROMPTS])
|
32 |
|
33 |
# WARNING DISPLAY
|
34 |
-
|
|
|
|
|
35 |
df_filtered = df[(df['provider_clean'] == provider) & (df['name_clean'] == model)]
|
36 |
|
37 |
if df_filtered['warning_arch'].values[0] and not df_filtered['warning_multi_modal'].values[0]:
|
@@ -42,6 +48,7 @@ def calculator_mode():
|
|
42 |
st.warning(WARNING_BOTH)
|
43 |
|
44 |
try:
|
|
|
45 |
impacts = llm_impacts(
|
46 |
provider=provider_raw,
|
47 |
model_name=model_raw,
|
|
|
6 |
from src.utils import format_impacts
|
7 |
from src.content import WARNING_CLOSED_SOURCE, WARNING_MULTI_MODAL, WARNING_BOTH
|
8 |
from src.models import load_models, clean_models_data
|
9 |
+
from src.constants import MAIN_MODELS
|
10 |
|
11 |
from src.constants import PROMPTS
|
12 |
|
|
|
14 |
|
15 |
with st.container(border=True):
|
16 |
|
17 |
+
df = load_models(filter_main=True)
|
18 |
|
19 |
col1, col2, col3 = st.columns(3)
|
20 |
|
21 |
with col1:
|
22 |
+
provider = st.selectbox(
|
23 |
+
label = 'Provider',
|
24 |
+
options = [x for x in df['provider_clean'].unique()],
|
25 |
+
index = 7
|
26 |
+
)
|
27 |
|
28 |
with col2:
|
29 |
+
model = st.selectbox(
|
30 |
+
label = 'Model',
|
31 |
+
options = [x for x in df['name_clean'].unique() if x in df[df['provider_clean'] == provider]['name_clean'].unique()]
|
32 |
+
)
|
33 |
|
34 |
with col3:
|
35 |
output_tokens = st.selectbox('Example prompt', [x[0] for x in PROMPTS])
|
36 |
|
37 |
# WARNING DISPLAY
|
38 |
+
provider_raw = df[(df['provider_clean'] == provider) & (df['name_clean'] == model)]['provider'].values[0]
|
39 |
+
model_raw = df[(df['provider_clean'] == provider) & (df['name_clean'] == model)]['name'].values[0]
|
40 |
+
|
41 |
df_filtered = df[(df['provider_clean'] == provider) & (df['name_clean'] == model)]
|
42 |
|
43 |
if df_filtered['warning_arch'].values[0] and not df_filtered['warning_multi_modal'].values[0]:
|
|
|
48 |
st.warning(WARNING_BOTH)
|
49 |
|
50 |
try:
|
51 |
+
|
52 |
impacts = llm_impacts(
|
53 |
provider=provider_raw,
|
54 |
model_name=model_raw,
|
src/constants.py
CHANGED
@@ -1,106 +1,102 @@
|
|
1 |
-
|
2 |
-
("
|
3 |
-
("
|
4 |
-
("
|
5 |
-
("
|
6 |
-
("
|
|
|
7 |
]
|
|
|
|
|
|
|
8 |
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
|
|
|
|
|
|
|
|
14 |
]
|
15 |
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
]
|
24 |
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
|
|
30 |
]
|
31 |
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
|
|
|
|
|
|
38 |
]
|
39 |
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
("Large", "mistral-large-2402"),
|
48 |
]
|
49 |
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
|
|
|
|
|
|
57 |
]
|
58 |
-
PROMPTS = [(s + f" ({v} output tokens)", v) for (s, v) in PROMPTS]
|
59 |
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
"anthropic/claude-3-haiku-20240307",
|
68 |
-
"anthropic/claude-2.1",
|
69 |
-
"anthropic/claude-2.0",
|
70 |
-
"anthropic/claude-instant-1.2",
|
71 |
-
"mistralai/mistral-tiny-2312",
|
72 |
-
"mistralai/mistral-small-2402",
|
73 |
-
"mistralai/mistral-medium-2312",
|
74 |
-
"mistralai/mistral-large-2402",
|
75 |
-
}
|
76 |
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
("Mistral AI / Mixtral 8x7B", "mistralai/open-mixtral-8x7b"),
|
90 |
-
("Mistral AI / Mixtral 8x22B", "mistralai/open-mixtral-8x22b"),
|
91 |
-
("Mistral AI / Tiny", "mistralai/mistral-tiny-2312"),
|
92 |
-
("Mistral AI / Small", "mistralai/mistral-small-2402"),
|
93 |
-
("Mistral AI / Medium", "mistralai/mistral-medium-2312"),
|
94 |
-
("Mistral AI / Large", "mistralai/mistral-large-2402"),
|
95 |
-
("Meta / Llama 3 8B", "huggingface_hub/meta-llama/Meta-Llama-3-8B"),
|
96 |
-
("Meta / Llama 3 70B", "huggingface_hub/meta-llama/Meta-Llama-3-70B"),
|
97 |
-
("Meta / Llama 2 7B", "huggingface_hub/meta-llama/Llama-2-7b-hf"),
|
98 |
-
("Meta / Llama 2 13B", "huggingface_hub/meta-llama/Llama-2-13b-hf"),
|
99 |
-
("Meta / Llama 2 70B", "huggingface_hub/meta-llama/Llama-2-70b-hf"),
|
100 |
-
("Cohere / Command Light", "cohere/command-light"),
|
101 |
-
("Cohere / Command", "cohere/command"),
|
102 |
-
("Cohere / Command R", "cohere/command-r"),
|
103 |
-
("Cohere / Command R+", "cohere/command-r-plus"),
|
104 |
]
|
105 |
|
106 |
-
|
|
|
1 |
+
PROMPTS = [
|
2 |
+
("Write a Tweet", 50),
|
3 |
+
("Write an email", 170),
|
4 |
+
("Write an article summary", 250),
|
5 |
+
("Small conversation with a chatbot", 400),
|
6 |
+
("Write a report of 5 pages", 5000),
|
7 |
+
("Write the code for this app", 15000)
|
8 |
]
|
9 |
+
PROMPTS = [(s + f" ({v} output tokens)", v) for (s, v) in PROMPTS]
|
10 |
+
|
11 |
+
MODEL_REPOSITORY_URL = "https://raw.githubusercontent.com/genai-impact/ecologits/refs/heads/main/ecologits/data/models.json"
|
12 |
|
13 |
+
main_models_openai = [
|
14 |
+
'chatgpt-4o-latest',
|
15 |
+
'gpt-3.5-turbo',
|
16 |
+
'gpt-4',
|
17 |
+
'gpt-4-turbo',
|
18 |
+
'gpt-4o',
|
19 |
+
'gpt-4o-mini',
|
20 |
+
'o1',
|
21 |
+
'o1-mini'
|
22 |
]
|
23 |
|
24 |
+
main_models_meta = [
|
25 |
+
'meta-llama/Meta-Llama-3.1-8B',
|
26 |
+
'meta-llama/Meta-Llama-3.1-70B',
|
27 |
+
'meta-llama/Meta-Llama-3.1-405B',
|
28 |
+
'meta-llama/Meta-Llama-3-8B',
|
29 |
+
'meta-llama/Meta-Llama-3-70B',
|
30 |
+
'meta-llama/Meta-Llama-3-70B',
|
31 |
+
'meta-llama/Llama-2-7b',
|
32 |
+
'meta-llama/Llama-2-13b',
|
33 |
+
'meta-llama/Llama-2-70b',
|
34 |
+
'meta-llama/CodeLlama-7b-hf',
|
35 |
+
'meta-llama/CodeLlama-13b-hf',
|
36 |
+
'meta-llama/CodeLlama-34b-hf',
|
37 |
+
'meta-llama/CodeLlama-70b-hf'
|
38 |
]
|
39 |
|
40 |
+
main_models_msft = [
|
41 |
+
'microsoft/phi-1',
|
42 |
+
'microsoft/phi-1_5',
|
43 |
+
'microsoft/Phi-3-mini-128k-instruct',
|
44 |
+
'microsoft/Phi-3-small-128k-instruct',
|
45 |
+
'microsoft/Phi-3-medium-128k-instruct',
|
46 |
]
|
47 |
|
48 |
+
main_models_anthropic = [
|
49 |
+
'claude-2.0',
|
50 |
+
'claude-2.1',
|
51 |
+
'claude-3-5-haiku-latest',
|
52 |
+
'claude-3-5-sonnet-latest',
|
53 |
+
'claude-3-7-sonnet-latest',
|
54 |
+
'claude-3-haiku-20240307',
|
55 |
+
'claude-3-opus-latest',
|
56 |
+
'claude-3-sonnet-20240229'
|
57 |
]
|
58 |
|
59 |
+
main_models_cohere = [
|
60 |
+
'c4ai-aya-expanse-8b',
|
61 |
+
'c4ai-aya-expanse-32b',
|
62 |
+
'command',
|
63 |
+
'command-light',
|
64 |
+
'command-r',
|
65 |
+
'command-r-plus',
|
|
|
66 |
]
|
67 |
|
68 |
+
main_models_google = [
|
69 |
+
'google/gemma-2-2b',
|
70 |
+
'google/gemma-2-9b',
|
71 |
+
'google/gemma-2-27b',
|
72 |
+
'google/codegemma-2b',
|
73 |
+
'google/codegemma-7b',
|
74 |
+
'gemini-1.0-pro',
|
75 |
+
'gemini-1.5-pro',
|
76 |
+
'gemini-1.5-flash',
|
77 |
+
'gemini-2.0-flash'
|
78 |
]
|
|
|
79 |
|
80 |
+
main_models_databricks = [
|
81 |
+
'databricks/dolly-v1-6b',
|
82 |
+
'databricks/dolly-v2-12b',
|
83 |
+
'databricks/dolly-v2-7b',
|
84 |
+
'databricks/dolly-v2-3b',
|
85 |
+
'databricks/dbrx-base'
|
86 |
+
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
|
88 |
+
main_models_mistral = [
|
89 |
+
'mistralai/Mistral-7B-v0.3',
|
90 |
+
'mistralai/Mixtral-8x7B-v0.1',
|
91 |
+
'mistralai/Mixtral-8x22B-v0.1',
|
92 |
+
'mistralai/Codestral-22B-v0.1',
|
93 |
+
'mistralai/Mathstral-7B-v0.1',
|
94 |
+
'ministral-3b-latest',
|
95 |
+
'ministral-8b-latest',
|
96 |
+
'mistral-tiny',
|
97 |
+
'mistral-small',
|
98 |
+
'mistral-medium',
|
99 |
+
'mistral-large-latest'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
]
|
101 |
|
102 |
+
MAIN_MODELS = main_models_meta + main_models_openai + main_models_anthropic + main_models_cohere + main_models_msft + main_models_mistral + main_models_databricks + main_models_google
|
src/models.py
CHANGED
@@ -1,9 +1,10 @@
|
|
1 |
import requests
|
2 |
import json
|
3 |
import pandas as pd
|
4 |
-
from src.constants import MODEL_REPOSITORY_URL
|
|
|
5 |
|
6 |
-
def clean_models_data(df):
|
7 |
|
8 |
dict_providers = {
|
9 |
'google': 'Google',
|
@@ -16,12 +17,14 @@ def clean_models_data(df):
|
|
16 |
'mistral-community': 'Mistral Community',
|
17 |
'databricks': 'Databricks'
|
18 |
}
|
|
|
|
|
19 |
|
20 |
df.drop('type', axis=1, inplace=True)
|
21 |
|
22 |
df.loc[df['name'].str.contains('/'), 'name_clean'] = df.loc[df['name'].str.contains('/'), 'name'].str.split('/').str[1]
|
23 |
df['name_clean'] = df['name_clean'].fillna(df['name'])
|
24 |
-
df['name_clean'] = df['name_clean'].replace({'-': ' '}, regex = True)
|
25 |
|
26 |
df.loc[df['provider'] == 'huggingface_hub', 'provider_clean'] = df.loc[df['provider'] == 'huggingface_hub', 'name'].str.split('/').str[0]
|
27 |
df['provider_clean'] = df['provider_clean'].fillna(df['provider'])
|
@@ -33,14 +36,18 @@ def clean_models_data(df):
|
|
33 |
df['warnings'] = df['warnings'].apply(lambda x: ', '.join(x) if x else None).fillna('none')
|
34 |
df['warning_arch'] = df['warnings'].apply(lambda x: 'model-arch-not-released' in x)
|
35 |
df['warning_multi_modal'] = df['warnings'].apply(lambda x: 'model-arch-multimodal' in x)
|
|
|
|
|
|
|
36 |
|
37 |
return df[['provider', 'provider_clean', 'name', 'name_clean', 'architecture_type', 'architecture_parameters', 'warning_arch', 'warning_multi_modal']]
|
38 |
|
39 |
-
|
|
|
40 |
|
41 |
resp = requests.get(MODEL_REPOSITORY_URL)
|
42 |
data = json.loads(resp.text)
|
43 |
df = pd.DataFrame(data['models'])
|
44 |
|
45 |
-
return clean_models_data(df)
|
46 |
|
|
|
1 |
import requests
|
2 |
import json
|
3 |
import pandas as pd
|
4 |
+
from src.constants import MODEL_REPOSITORY_URL, MAIN_MODELS
|
5 |
+
import streamlit as st
|
6 |
|
7 |
+
def clean_models_data(df, with_filter = True):
|
8 |
|
9 |
dict_providers = {
|
10 |
'google': 'Google',
|
|
|
17 |
'mistral-community': 'Mistral Community',
|
18 |
'databricks': 'Databricks'
|
19 |
}
|
20 |
+
|
21 |
+
models_to_keep = MAIN_MODELS
|
22 |
|
23 |
df.drop('type', axis=1, inplace=True)
|
24 |
|
25 |
df.loc[df['name'].str.contains('/'), 'name_clean'] = df.loc[df['name'].str.contains('/'), 'name'].str.split('/').str[1]
|
26 |
df['name_clean'] = df['name_clean'].fillna(df['name'])
|
27 |
+
df['name_clean'] = df['name_clean'].replace({'-': ' ', 'latest': ''}, regex = True)
|
28 |
|
29 |
df.loc[df['provider'] == 'huggingface_hub', 'provider_clean'] = df.loc[df['provider'] == 'huggingface_hub', 'name'].str.split('/').str[0]
|
30 |
df['provider_clean'] = df['provider_clean'].fillna(df['provider'])
|
|
|
36 |
df['warnings'] = df['warnings'].apply(lambda x: ', '.join(x) if x else None).fillna('none')
|
37 |
df['warning_arch'] = df['warnings'].apply(lambda x: 'model-arch-not-released' in x)
|
38 |
df['warning_multi_modal'] = df['warnings'].apply(lambda x: 'model-arch-multimodal' in x)
|
39 |
+
|
40 |
+
if with_filter == True:
|
41 |
+
df = df[df['name'].isin(models_to_keep)]
|
42 |
|
43 |
return df[['provider', 'provider_clean', 'name', 'name_clean', 'architecture_type', 'architecture_parameters', 'warning_arch', 'warning_multi_modal']]
|
44 |
|
45 |
+
@st.cache_data
|
46 |
+
def load_models(filter_main = True):
|
47 |
|
48 |
resp = requests.get(MODEL_REPOSITORY_URL)
|
49 |
data = json.loads(resp.text)
|
50 |
df = pd.DataFrame(data['models'])
|
51 |
|
52 |
+
return clean_models_data(df, filter_main)
|
53 |
|
utils.py
DELETED
@@ -1,262 +0,0 @@
|
|
1 |
-
from dataclasses import dataclass
|
2 |
-
from enum import Enum
|
3 |
-
|
4 |
-
import pandas as pd
|
5 |
-
from ecologits.model_repository import models
|
6 |
-
from ecologits.impacts.modeling import Impacts, Energy, GWP, ADPe, PE
|
7 |
-
#from ecologits.tracers.utils import llm_impacts
|
8 |
-
from pint import UnitRegistry, Quantity
|
9 |
-
|
10 |
-
#####################################################################################
|
11 |
-
### UNITS DEFINITION
|
12 |
-
#####################################################################################
|
13 |
-
|
14 |
-
u = UnitRegistry()
|
15 |
-
u.define('Wh = watt_hour')
|
16 |
-
u.define('kWh = kilowatt_hour')
|
17 |
-
u.define('MWh = megawatt_hour')
|
18 |
-
u.define('GWh = gigawatt_hour')
|
19 |
-
u.define('TWh = terawatt_hour')
|
20 |
-
u.define('gCO2eq = gram')
|
21 |
-
u.define('kgCO2eq = kilogram')
|
22 |
-
u.define('tCO2eq = metricton')
|
23 |
-
u.define('kgSbeq = kilogram')
|
24 |
-
u.define('kJ = kilojoule')
|
25 |
-
u.define('MJ = megajoule')
|
26 |
-
u.define('m = meter')
|
27 |
-
u.define('km = kilometer')
|
28 |
-
u.define('s = second')
|
29 |
-
u.define('min = minute')
|
30 |
-
u.define('h = hour')
|
31 |
-
q = u.Quantity
|
32 |
-
|
33 |
-
@dataclass
|
34 |
-
class QImpacts:
|
35 |
-
energy: Quantity
|
36 |
-
gwp: Quantity
|
37 |
-
adpe: Quantity
|
38 |
-
pe: Quantity
|
39 |
-
|
40 |
-
|
41 |
-
class PhysicalActivity(str, Enum):
|
42 |
-
RUNNING = "running"
|
43 |
-
WALKING = "walking"
|
44 |
-
|
45 |
-
|
46 |
-
class EnergyProduction(str, Enum):
|
47 |
-
NUCLEAR = "nuclear"
|
48 |
-
WIND = "wind"
|
49 |
-
|
50 |
-
|
51 |
-
COUNTRIES = [
|
52 |
-
("cook_islands", 38.81, 9_556),
|
53 |
-
("tonga", 51.15, 104_490),
|
54 |
-
("comoros", 100, 821_632),
|
55 |
-
("samoa", 100, 821_632),
|
56 |
-
]
|
57 |
-
|
58 |
-
#####################################################################################
|
59 |
-
### EQUIVALENT RAW DATA
|
60 |
-
#####################################################################################
|
61 |
-
|
62 |
-
# From https://www.runningtools.com/energyusage.htm
|
63 |
-
RUNNING_ENERGY_EQ = q("294 kJ / km") # running 1 km at 10 km/h with a weight of 70 kg
|
64 |
-
WALKING_ENERGY_EQ = q("196 kJ / km") # walking 1 km at 3 km/h with a weight of 70 kg
|
65 |
-
|
66 |
-
# From https://selectra.info/energie/actualites/insolite/consommation-vehicules-electriques-france-2040
|
67 |
-
# and https://www.tesla.com/fr_fr/support/power-consumption
|
68 |
-
EV_ENERGY_EQ = q("0.17 kWh / km")
|
69 |
-
|
70 |
-
# From https://impactco2.fr/outils/comparateur?value=1&comparisons=streamingvideo
|
71 |
-
STREAMING_GWP_EQ = q("15.6 h / kgCO2eq")
|
72 |
-
|
73 |
-
# From https://ourworldindata.org/population-growth
|
74 |
-
ONE_PERCENT_WORLD_POPULATION = 80_000_000
|
75 |
-
|
76 |
-
DAYS_IN_YEAR = 365
|
77 |
-
|
78 |
-
# For a 900 MW nuclear plant -> 500 000 MWh / month
|
79 |
-
# From https://www.edf.fr/groupe-edf/espaces-dedies/jeunes-enseignants/pour-les-jeunes/lenergie-de-a-a-z/produire-de-lelectricite/le-nucleaire-en-chiffres
|
80 |
-
YEARLY_NUCLEAR_ENERGY_EQ = q("6 TWh")
|
81 |
-
|
82 |
-
# For a 2MW wind turbine
|
83 |
-
# https://www.ecologie.gouv.fr/eolien-terrestre
|
84 |
-
YEARLY_WIND_ENERGY_EQ = q("4.2 GWh")
|
85 |
-
|
86 |
-
# Ireland yearly electricity consumption
|
87 |
-
# From https://en.wikipedia.org/wiki/List_of_countries_by_electricity_consumption
|
88 |
-
YEARLY_IRELAND_ELECTRICITY_CONSUMPTION = q("33 TWh")
|
89 |
-
IRELAND_POPULATION_MILLION = 5
|
90 |
-
|
91 |
-
# From https://impactco2.fr/outils/comparateur?value=1&comparisons=&equivalent=avion-pny
|
92 |
-
AIRPLANE_PARIS_NYC_GWP_EQ = q("177000 kgCO2eq")
|
93 |
-
|
94 |
-
def filter_models(provider, list_models):
|
95 |
-
|
96 |
-
model = 1
|
97 |
-
|
98 |
-
return model
|
99 |
-
|
100 |
-
#####################################################################################
|
101 |
-
### IMPACTS FORMATING
|
102 |
-
#####################################################################################
|
103 |
-
|
104 |
-
def format_energy(energy: Energy) -> Quantity:
|
105 |
-
val = q(energy.value, energy.unit)
|
106 |
-
if val < q("1 kWh"):
|
107 |
-
val = val.to("Wh")
|
108 |
-
return val
|
109 |
-
|
110 |
-
def format_gwp(gwp: GWP) -> Quantity:
|
111 |
-
val = q(gwp.value, gwp.unit)
|
112 |
-
if val < q("1 kgCO2eq"):
|
113 |
-
val = val.to("gCO2eq")
|
114 |
-
return val
|
115 |
-
|
116 |
-
def format_adpe(adpe: ADPe) -> Quantity:
|
117 |
-
return q(adpe.value, adpe.unit)
|
118 |
-
|
119 |
-
def format_pe(pe: PE) -> Quantity:
|
120 |
-
val = q(pe.value, pe.unit)
|
121 |
-
if val < q("1 MJ"):
|
122 |
-
val = val.to("kJ")
|
123 |
-
return val
|
124 |
-
|
125 |
-
def format_impacts(impacts: Impacts) -> QImpacts:
|
126 |
-
|
127 |
-
try:
|
128 |
-
impacts.energy.value = (impacts.energy.value.max + impacts.energy.value.min)/2
|
129 |
-
impacts.gwp.value = (impacts.gwp.value.max + impacts.gwp.value.min)/2
|
130 |
-
impacts.adpe.value = (impacts.adpe.value.max + impacts.adpe.value.min)/2
|
131 |
-
impacts.pe.value = (impacts.pe.value.max + impacts.pe.value.min)/2
|
132 |
-
return QImpacts(
|
133 |
-
energy=format_energy(impacts.energy),
|
134 |
-
gwp=format_gwp(impacts.gwp),
|
135 |
-
adpe=format_adpe(impacts.adpe),
|
136 |
-
pe=format_pe(impacts.pe)
|
137 |
-
), impacts.usage, impacts.embodied
|
138 |
-
except: #when no range
|
139 |
-
return QImpacts(
|
140 |
-
energy=format_energy(impacts.energy),
|
141 |
-
gwp=format_gwp(impacts.gwp),
|
142 |
-
adpe=format_adpe(impacts.adpe),
|
143 |
-
pe=format_pe(impacts.pe)
|
144 |
-
), impacts.usage, impacts.embodied
|
145 |
-
|
146 |
-
def split_impacts_u_e(impacts: Impacts) -> QImpacts:
|
147 |
-
return impacts.usage, impacts.embodied
|
148 |
-
|
149 |
-
def average_range_impacts(RangeValue):
|
150 |
-
return (RangeValue.max + RangeValue.min)/2
|
151 |
-
|
152 |
-
def format_impacts_expert(impacts: Impacts, display_range: bool) -> QImpacts:
|
153 |
-
|
154 |
-
if display_range:
|
155 |
-
return QImpacts(
|
156 |
-
energy=format_energy(impacts.energy),
|
157 |
-
gwp=format_gwp(impacts.gwp),
|
158 |
-
adpe=format_adpe(impacts.adpe),
|
159 |
-
pe=format_pe(impacts.pe)
|
160 |
-
), impacts.usage, impacts.embodied
|
161 |
-
|
162 |
-
else:
|
163 |
-
energy = {"value":(impacts.energy.value.max + impacts.energy.value.min)/2, "unit":impacts.energy.unit}
|
164 |
-
gwp = (impacts.gwp.value.max + impacts.gwp.value.min)/2
|
165 |
-
adpe = (impacts.adpe.value.max + impacts.adpe.value.min)/2
|
166 |
-
pe = (impacts.pe.value.max + impacts.pe.value.min)/2
|
167 |
-
return QImpacts(
|
168 |
-
energy=format_energy(energy),
|
169 |
-
gwp=format_gwp(gwp),
|
170 |
-
adpe=format_adpe(adpe),
|
171 |
-
pe=format_pe(pe)
|
172 |
-
), impacts.usage, impacts.embodied
|
173 |
-
|
174 |
-
#####################################################################################
|
175 |
-
### EQUIVALENT FORMATING
|
176 |
-
#####################################################################################
|
177 |
-
|
178 |
-
def format_energy_eq_physical_activity(energy: Quantity) -> tuple[PhysicalActivity, Quantity]:
|
179 |
-
energy = energy.to("kJ")
|
180 |
-
running_eq = energy / RUNNING_ENERGY_EQ
|
181 |
-
if running_eq > q("1 km"):
|
182 |
-
return PhysicalActivity.RUNNING, running_eq
|
183 |
-
|
184 |
-
walking_eq = energy / WALKING_ENERGY_EQ
|
185 |
-
if walking_eq < q("1 km"):
|
186 |
-
walking_eq = walking_eq.to("meter")
|
187 |
-
return PhysicalActivity.WALKING, walking_eq
|
188 |
-
|
189 |
-
def format_energy_eq_electric_vehicle(energy: Quantity) -> Quantity:
|
190 |
-
energy = energy.to("kWh")
|
191 |
-
ev_eq = energy / EV_ENERGY_EQ
|
192 |
-
if ev_eq < q("1 km"):
|
193 |
-
ev_eq = ev_eq.to("meter")
|
194 |
-
return ev_eq
|
195 |
-
|
196 |
-
def format_gwp_eq_streaming(gwp: Quantity) -> Quantity:
|
197 |
-
gwp = gwp.to("kgCO2eq")
|
198 |
-
streaming_eq = gwp * STREAMING_GWP_EQ
|
199 |
-
if streaming_eq < q("1 h"):
|
200 |
-
streaming_eq = streaming_eq.to("min")
|
201 |
-
if streaming_eq < q("1 min"):
|
202 |
-
streaming_eq = streaming_eq.to("s")
|
203 |
-
return streaming_eq
|
204 |
-
|
205 |
-
def format_energy_eq_electricity_production(energy: Quantity) -> tuple[EnergyProduction, Quantity]:
|
206 |
-
electricity_eq = energy * ONE_PERCENT_WORLD_POPULATION * DAYS_IN_YEAR
|
207 |
-
electricity_eq = electricity_eq.to("TWh")
|
208 |
-
if electricity_eq > YEARLY_NUCLEAR_ENERGY_EQ:
|
209 |
-
return EnergyProduction.NUCLEAR, electricity_eq / YEARLY_NUCLEAR_ENERGY_EQ
|
210 |
-
electricity_eq = electricity_eq.to("GWh")
|
211 |
-
return EnergyProduction.WIND, electricity_eq / YEARLY_WIND_ENERGY_EQ
|
212 |
-
|
213 |
-
|
214 |
-
def format_energy_eq_electricity_consumption_ireland(energy: Quantity) -> Quantity:
|
215 |
-
electricity_eq = energy * ONE_PERCENT_WORLD_POPULATION * DAYS_IN_YEAR
|
216 |
-
electricity_eq = electricity_eq.to("TWh")
|
217 |
-
return electricity_eq / YEARLY_IRELAND_ELECTRICITY_CONSUMPTION
|
218 |
-
|
219 |
-
def format_gwp_eq_airplane_paris_nyc(gwp: Quantity) -> Quantity:
|
220 |
-
gwp_eq = gwp * ONE_PERCENT_WORLD_POPULATION * DAYS_IN_YEAR
|
221 |
-
gwp_eq = gwp_eq.to("kgCO2eq")
|
222 |
-
return gwp_eq / AIRPLANE_PARIS_NYC_GWP_EQ
|
223 |
-
|
224 |
-
#####################################################################################
|
225 |
-
### MODELS PARAMETERS
|
226 |
-
#####################################################################################
|
227 |
-
|
228 |
-
def model_active_params_fn(provider_name: str, model_name: str, n_param: float):
|
229 |
-
if model_name == 'CUSTOM':
|
230 |
-
return n_param
|
231 |
-
else:
|
232 |
-
model = models.find_model(provider=provider_name, model_name=model_name)
|
233 |
-
|
234 |
-
if model.architecture == 'moe':
|
235 |
-
try:
|
236 |
-
return model.architecture.parameters.active.max
|
237 |
-
except:
|
238 |
-
try:
|
239 |
-
return model.architecture.parameters.active
|
240 |
-
except:
|
241 |
-
return model.architecture.parameters
|
242 |
-
elif model.architecture == 'dense':
|
243 |
-
try: #dense with range
|
244 |
-
return model.architecture.parameters.max
|
245 |
-
except: #dense without range
|
246 |
-
return model.architecture.parameters
|
247 |
-
|
248 |
-
def model_total_params_fn(provider_name: str, model_name: str, n_param: float):
|
249 |
-
if model_name == 'CUSTOM':
|
250 |
-
return n_param
|
251 |
-
provider, model_name = model_name.split('/', 1)
|
252 |
-
model = models.find_model(provider=provider, model_name=model_name)
|
253 |
-
try: #moe
|
254 |
-
return model.architecture.parameters.total.max
|
255 |
-
except:
|
256 |
-
try: #dense with range
|
257 |
-
return model.architecture.parameters.max
|
258 |
-
except: #dense without range
|
259 |
-
try:
|
260 |
-
return model.architecture.parameters.total
|
261 |
-
except:
|
262 |
-
return model.architecture.parameters
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
uv.lock
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
version = 1
|
2 |
-
revision = 1
|
3 |
requires-python = ">=3.12"
|
4 |
|
5 |
[[package]]
|
|
|
1 |
version = 1
|
|
|
2 |
requires-python = ">=3.12"
|
3 |
|
4 |
[[package]]
|