Update app.py
Browse files
app.py
CHANGED
@@ -27,6 +27,7 @@ tokenizer, generator_conf, model = load_model(REPO_NAME)
|
|
27 |
total_params = sum(p.numel() for p in model.parameters())
|
28 |
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
|
29 |
embed_params = sum(p.numel() for p in model.model.embed_tokens.parameters())*2
|
|
|
30 |
|
31 |
st.markdown(f"*This chat uses the {REPO_NAME} model with {model.get_memory_footprint() / 1e6:.2f} MB memory footprint. ")
|
32 |
|
@@ -34,7 +35,7 @@ st.markdown(f"*This chat uses the {REPO_NAME} model with {model.get_memory_footp
|
|
34 |
# st.markdown(f"Total number of trainable parameters: {trainable_params}. ")
|
35 |
# st.markdown(f"Total number of embed parameters: {embed_params}. ")
|
36 |
|
37 |
-
st.markdown(f"Total number of non embedding trainable parameters: {
|
38 |
st.markdown(f"You may ask questions such as 'What is biology?' or 'What is the human body?'*")
|
39 |
|
40 |
try:
|
|
|
27 |
total_params = sum(p.numel() for p in model.parameters())
|
28 |
trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
|
29 |
embed_params = sum(p.numel() for p in model.model.embed_tokens.parameters())*2
|
30 |
+
non_embed_params = (trainable_params - embed_params) / 1e6
|
31 |
|
32 |
st.markdown(f"*This chat uses the {REPO_NAME} model with {model.get_memory_footprint() / 1e6:.2f} MB memory footprint. ")
|
33 |
|
|
|
35 |
# st.markdown(f"Total number of trainable parameters: {trainable_params}. ")
|
36 |
# st.markdown(f"Total number of embed parameters: {embed_params}. ")
|
37 |
|
38 |
+
st.markdown(f"Total number of non embedding trainable parameters: {non_embed_params:.2f} million. ")
|
39 |
st.markdown(f"You may ask questions such as 'What is biology?' or 'What is the human body?'*")
|
40 |
|
41 |
try:
|