import streamlit as st st.header("Transformer parameters") col1, col2 = st.columns([2, 4]) col1.write('Batch size: ') bs = col2.number_input('', value=10) col1.write('Num heads:') h = col2.number_input('', value=16) col1.write('Dimension:') d = col2.number_input('', value=768) col1.write('Seq length:') n = col2.number_input('', value=1024) st.header('Query, Key, Value projection') mha_flop = 2*bs*n*d*3*d mha_bytes = 2*bs*n*d + 2*3*d*d + 2*bs*n*3*d st.subheader("Multi-query Attention") c1, c2 = st.columns([2, 3]) c1.write("FLOP:") c2.write(str(mha_flop)) c1.write("Bytes: ") c2.write(str(mha_bytes)) c1.write("Arithm. intensity:") c2.write(str(mha_flop/mha_bytes)) mqa_flop = 2*bs*n*d*(1+2/h)*d mqa_bytes = 2*bs*n*d + 2*(2/h)*d*d + 2*bs*n*(2/h)*d st.subheader("Multi-query Attention") c1, c2 = st.columns([2, 3]) c1.write("FLOP:") c2.write(str(mqa_flop)) c1.write("Bytes: ") c2.write(str(mqa_bytes)) c1.write("Arithm. intensity:") c2.write(str(mqa_flop/mqa_bytes)) st.header('Attention')